summaryrefslogtreecommitdiff
path: root/deps/v8/src/maglev
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-04-12 11:10:15 +0200
committerMichaël Zasso <targos@protonmail.com>2022-04-12 22:08:39 +0200
commitfd4f80ce54d7f7b7503e0999f6a9d293d493846d (patch)
tree00fba34b8aabeb481c7128fccee635719ee44a3b /deps/v8/src/maglev
parent73d53fe9f56d7ce5de4b9c9ad5257dc601bbce14 (diff)
downloadnode-new-fd4f80ce54d7f7b7503e0999f6a9d293d493846d.tar.gz
deps: update V8 to 10.1.124.6
PR-URL: https://github.com/nodejs/node/pull/42657 Reviewed-By: Darshan Sen <raisinten@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com>
Diffstat (limited to 'deps/v8/src/maglev')
-rw-r--r--deps/v8/src/maglev/DEPS6
-rw-r--r--deps/v8/src/maglev/OWNERS3
-rw-r--r--deps/v8/src/maglev/maglev-basic-block.h107
-rw-r--r--deps/v8/src/maglev/maglev-code-gen-state.h135
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.cc378
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.h27
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.cc123
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.h137
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.cc45
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.h57
-rw-r--r--deps/v8/src/maglev/maglev-compiler.cc209
-rw-r--r--deps/v8/src/maglev/maglev-compiler.h53
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.cc194
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.h92
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.cc616
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.h383
-rw-r--r--deps/v8/src/maglev/maglev-graph-labeller.h65
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.cc446
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.h85
-rw-r--r--deps/v8/src/maglev/maglev-graph-processor.h423
-rw-r--r--deps/v8/src/maglev/maglev-graph.h60
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.h400
-rw-r--r--deps/v8/src/maglev/maglev-ir.cc922
-rw-r--r--deps/v8/src/maglev/maglev-ir.h1461
-rw-r--r--deps/v8/src/maglev/maglev-regalloc-data.h83
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.cc875
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.h112
-rw-r--r--deps/v8/src/maglev/maglev-register-frame-array.h113
-rw-r--r--deps/v8/src/maglev/maglev-vreg-allocator.h57
-rw-r--r--deps/v8/src/maglev/maglev.cc24
-rw-r--r--deps/v8/src/maglev/maglev.h28
31 files changed, 7719 insertions, 0 deletions
diff --git a/deps/v8/src/maglev/DEPS b/deps/v8/src/maglev/DEPS
new file mode 100644
index 0000000000..f3fa2ecc66
--- /dev/null
+++ b/deps/v8/src/maglev/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ # Allow Maglev to depend on TurboFan data structures.
+ # TODO(v8:7700): Clean up these dependencies by extracting common code to a
+ # separate directory.
+ "+src/compiler",
+]
diff --git a/deps/v8/src/maglev/OWNERS b/deps/v8/src/maglev/OWNERS
new file mode 100644
index 0000000000..dca7476a04
--- /dev/null
+++ b/deps/v8/src/maglev/OWNERS
@@ -0,0 +1,3 @@
+leszeks@chromium.org
+jgruber@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/maglev/maglev-basic-block.h b/deps/v8/src/maglev/maglev-basic-block.h
new file mode 100644
index 0000000000..f6222944fe
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-basic-block.h
@@ -0,0 +1,107 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
+#define V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
+
+#include <vector>
+
+#include "src/codegen/label.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+using NodeIterator = Node::List::Iterator;
+using NodeConstIterator = Node::List::Iterator;
+
+class BasicBlock {
+ public:
+ explicit BasicBlock(MergePointInterpreterFrameState* state)
+ : control_node_(nullptr), state_(state) {}
+
+ uint32_t first_id() const {
+ if (has_phi()) return phis()->first()->id();
+ return nodes_.is_empty() ? control_node()->id() : nodes_.first()->id();
+ }
+
+ uint32_t FirstNonGapMoveId() const {
+ if (has_phi()) return phis()->first()->id();
+ if (!nodes_.is_empty()) {
+ for (const Node* node : nodes_) {
+ if (node->Is<GapMove>()) continue;
+ return node->id();
+ }
+ }
+ return control_node()->id();
+ }
+
+ Node::List& nodes() { return nodes_; }
+
+ ControlNode* control_node() const { return control_node_; }
+ void set_control_node(ControlNode* control_node) {
+ DCHECK_NULL(control_node_);
+ control_node_ = control_node;
+ }
+
+ bool has_phi() const { return has_state() && state_->has_phi(); }
+
+ bool is_empty_block() const { return is_empty_block_; }
+
+ BasicBlock* empty_block_predecessor() const {
+ DCHECK(is_empty_block());
+ return empty_block_predecessor_;
+ }
+
+ void set_empty_block_predecessor(BasicBlock* predecessor) {
+ DCHECK(nodes_.is_empty());
+ DCHECK(control_node()->Is<Jump>());
+ DCHECK_NULL(state_);
+ is_empty_block_ = true;
+ empty_block_predecessor_ = predecessor;
+ }
+
+ Phi::List* phis() const {
+ DCHECK(has_phi());
+ return state_->phis();
+ }
+
+ BasicBlock* predecessor_at(int i) const {
+ DCHECK_NOT_NULL(state_);
+ return state_->predecessor_at(i);
+ }
+
+ int predecessor_id() const {
+ return control_node()->Cast<UnconditionalControlNode>()->predecessor_id();
+ }
+ void set_predecessor_id(int id) {
+ control_node()->Cast<UnconditionalControlNode>()->set_predecessor_id(id);
+ }
+
+ Label* label() { return &label_; }
+ MergePointInterpreterFrameState* state() const {
+ DCHECK(has_state());
+ return state_;
+ }
+ bool has_state() const { return state_ != nullptr && !is_empty_block(); }
+
+ private:
+ bool is_empty_block_ = false;
+ Node::List nodes_;
+ ControlNode* control_node_;
+ union {
+ MergePointInterpreterFrameState* state_;
+ BasicBlock* empty_block_predecessor_;
+ };
+ Label label_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
diff --git a/deps/v8/src/maglev/maglev-code-gen-state.h b/deps/v8/src/maglev/maglev-code-gen-state.h
new file mode 100644
index 0000000000..ecf8bbccda
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-code-gen-state.h
@@ -0,0 +1,135 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
+#define V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/label.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/common/globals.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevCodeGenState {
+ public:
+ class DeferredCodeInfo {
+ public:
+ virtual void Generate(MaglevCodeGenState* code_gen_state,
+ Label* return_label) = 0;
+ Label deferred_code_label;
+ Label return_label;
+ };
+
+ MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
+ SafepointTableBuilder* safepoint_table_builder)
+ : compilation_unit_(compilation_unit),
+ safepoint_table_builder_(safepoint_table_builder),
+ masm_(isolate(), CodeObjectRequired::kNo) {}
+
+ void SetVregSlots(int slots) { vreg_slots_ = slots; }
+
+ void PushDeferredCode(DeferredCodeInfo* deferred_code) {
+ deferred_code_.push_back(deferred_code);
+ }
+ void EmitDeferredCode() {
+ for (auto& deferred_code : deferred_code_) {
+ masm()->RecordComment("-- Deferred block");
+ masm()->bind(&deferred_code->deferred_code_label);
+ deferred_code->Generate(this, &deferred_code->return_label);
+ masm()->int3();
+ }
+ }
+
+ compiler::NativeContextRef native_context() const {
+ return broker()->target_native_context();
+ }
+ Isolate* isolate() const { return compilation_unit_->isolate(); }
+ int parameter_count() const { return compilation_unit_->parameter_count(); }
+ int register_count() const { return compilation_unit_->register_count(); }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return compilation_unit_->bytecode_analysis();
+ }
+ compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
+ const compiler::BytecodeArrayRef& bytecode() const {
+ return compilation_unit_->bytecode();
+ }
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+ MacroAssembler* masm() { return &masm_; }
+ int vreg_slots() const { return vreg_slots_; }
+ SafepointTableBuilder* safepoint_table_builder() const {
+ return safepoint_table_builder_;
+ }
+ MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
+
+ // TODO(v8:7700): Clean up after all code paths are supported.
+ void set_found_unsupported_code_paths(bool val) {
+ found_unsupported_code_paths_ = val;
+ }
+ bool found_unsupported_code_paths() const {
+ return found_unsupported_code_paths_;
+ }
+
+ private:
+ MaglevCompilationUnit* const compilation_unit_;
+ SafepointTableBuilder* const safepoint_table_builder_;
+
+ MacroAssembler masm_;
+ std::vector<DeferredCodeInfo*> deferred_code_;
+ int vreg_slots_ = 0;
+
+ // Allow marking some codegen paths as unsupported, so that we can test maglev
+ // incrementally.
+ // TODO(v8:7700): Clean up after all code paths are supported.
+ bool found_unsupported_code_paths_ = false;
+};
+
+// Some helpers for codegen.
+// TODO(leszeks): consider moving this to a separate header.
+
+inline MemOperand GetStackSlot(int index) {
+ return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize);
+}
+
+inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
+ return GetStackSlot(operand.index());
+}
+
+inline Register ToRegister(const compiler::InstructionOperand& operand) {
+ return compiler::AllocatedOperand::cast(operand).GetRegister();
+}
+
+inline Register ToRegister(const ValueLocation& location) {
+ return ToRegister(location.operand());
+}
+
+inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
+ return GetStackSlot(compiler::AllocatedOperand::cast(operand));
+}
+
+inline MemOperand ToMemOperand(const ValueLocation& location) {
+ return ToMemOperand(location.operand());
+}
+
+inline int GetSafepointIndexForStackSlot(int i) {
+ // Safepoint tables also contain slots for all fixed frame slots (both
+ // above and below the fp).
+ return StandardFrameConstants::kFixedSlotCount + i;
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
diff --git a/deps/v8/src/maglev/maglev-code-generator.cc b/deps/v8/src/maglev/maglev-code-generator.cc
new file mode 100644
index 0000000000..f578d53777
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-code-generator.cc
@@ -0,0 +1,378 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-code-generator.h"
+
+#include "src/codegen/code-desc.h"
+#include "src/codegen/register.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+
+namespace v8 {
+namespace internal {
+
+namespace maglev {
+
+#define __ masm()->
+
+namespace {
+
+template <typename T, size_t... Is>
+std::array<T, sizeof...(Is)> repeat(T value, std::index_sequence<Is...>) {
+ return {((void)Is, value)...};
+}
+
+template <size_t N, typename T>
+std::array<T, N> repeat(T value) {
+ return repeat<T>(value, std::make_index_sequence<N>());
+}
+
+using RegisterMoves = std::array<Register, Register::kNumRegisters>;
+using StackToRegisterMoves =
+ std::array<compiler::InstructionOperand, Register::kNumRegisters>;
+
+class MaglevCodeGeneratingNodeProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates = true;
+
+ explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
+ : code_gen_state_(code_gen_state) {}
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {
+ if (FLAG_maglev_break_on_entry) {
+ __ int3();
+ }
+
+ __ EnterFrame(StackFrame::BASELINE);
+
+ // Save arguments in frame.
+ // TODO(leszeks): Consider eliding this frame if we don't make any calls
+ // that could clobber these registers.
+ __ Push(kContextRegister);
+ __ Push(kJSFunctionRegister); // Callee's JS function.
+ __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
+
+ // Extend rsp by the size of the frame.
+ code_gen_state_->SetVregSlots(graph->stack_slots());
+ __ subq(rsp, Immediate(code_gen_state_->vreg_slots() * kSystemPointerSize));
+
+ // Initialize stack slots.
+ // TODO(jgruber): Update logic once the register allocator is further along.
+ {
+ ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
+ __ Move(rax, Immediate(0));
+ __ Move(rcx, Immediate(code_gen_state_->vreg_slots()));
+ __ leaq(rdi, GetStackSlot(code_gen_state_->vreg_slots() - 1));
+ __ repstosq();
+ }
+
+ // We don't emit proper safepoint data yet; instead, define a single
+ // safepoint at the end of the code object, with all-tagged stack slots.
+ // TODO(jgruber): Real safepoint handling.
+ SafepointTableBuilder::Safepoint safepoint =
+ safepoint_table_builder()->DefineSafepoint(masm());
+ for (int i = 0; i < code_gen_state_->vreg_slots(); i++) {
+ safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
+ }
+ }
+
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
+ code_gen_state_->EmitDeferredCode();
+ }
+
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- Block b" << graph_labeller()->BlockId(block);
+ __ RecordComment(ss.str());
+ }
+
+ __ bind(block->label());
+ }
+
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- " << graph_labeller()->NodeId(node) << ": "
+ << PrintNode(graph_labeller(), node);
+ __ RecordComment(ss.str());
+ }
+
+ // Emit Phi moves before visiting the control node.
+ if (std::is_base_of<UnconditionalControlNode, NodeT>::value) {
+ EmitBlockEndGapMoves(node->template Cast<UnconditionalControlNode>(),
+ state);
+ }
+
+ node->GenerateCode(code_gen_state_, state);
+
+ if (std::is_base_of<ValueNode, NodeT>::value) {
+ ValueNode* value_node = node->template Cast<ValueNode>();
+ if (value_node->is_spilled()) {
+ compiler::AllocatedOperand source =
+ compiler::AllocatedOperand::cast(value_node->result().operand());
+ // We shouldn't spill nodes which already output to the stack.
+ if (!source.IsStackSlot()) {
+ if (FLAG_code_comments) __ RecordComment("-- Spill:");
+ DCHECK(!source.IsStackSlot());
+ __ movq(GetStackSlot(value_node->spill_slot()), ToRegister(source));
+ } else {
+ // Otherwise, the result source stack slot should be equal to the
+ // spill slot.
+ DCHECK_EQ(source.index(), value_node->spill_slot().index());
+ }
+ }
+ }
+ }
+
+ void EmitSingleParallelMove(Register source, Register target,
+ RegisterMoves& moves) {
+ DCHECK(!moves[target.code()].is_valid());
+ __ movq(target, source);
+ moves[source.code()] = Register::no_reg();
+ }
+
+ bool RecursivelyEmitParallelMoveChain(Register chain_start, Register source,
+ Register target, RegisterMoves& moves) {
+ if (target == chain_start) {
+ // The target of this move is the start of the move chain -- this
+ // means that there is a cycle, and we have to break it by moving
+ // the chain start into a temporary.
+
+ __ RecordComment("-- * Cycle");
+ EmitSingleParallelMove(target, kScratchRegister, moves);
+ EmitSingleParallelMove(source, target, moves);
+ return true;
+ }
+ bool is_cycle = false;
+ if (moves[target.code()].is_valid()) {
+ is_cycle = RecursivelyEmitParallelMoveChain(chain_start, target,
+ moves[target.code()], moves);
+ } else {
+ __ RecordComment("-- * Chain start");
+ }
+ if (is_cycle && source == chain_start) {
+ EmitSingleParallelMove(kScratchRegister, target, moves);
+ __ RecordComment("-- * end cycle");
+ } else {
+ EmitSingleParallelMove(source, target, moves);
+ }
+ return is_cycle;
+ }
+
+ void EmitParallelMoveChain(Register source, RegisterMoves& moves) {
+ Register target = moves[source.code()];
+ if (!target.is_valid()) return;
+
+ DCHECK_NE(source, target);
+ RecursivelyEmitParallelMoveChain(source, source, target, moves);
+ }
+
+ void EmitStackToRegisterGapMove(compiler::InstructionOperand source,
+ Register target) {
+ if (!source.IsAllocated()) return;
+ __ movq(target, GetStackSlot(compiler::AllocatedOperand::cast(source)));
+ }
+
+ void RecordGapMove(compiler::AllocatedOperand source, Register target_reg,
+ RegisterMoves& register_moves,
+ StackToRegisterMoves& stack_to_register_moves) {
+ if (source.IsStackSlot()) {
+ // For stack->reg moves, don't emit the move yet, but instead record the
+ // move in the set of stack-to-register moves, to be executed after the
+ // reg->reg parallel moves.
+ stack_to_register_moves[target_reg.code()] = source;
+ } else {
+ // For reg->reg moves, don't emit the move yet, but instead record the
+ // move in the set of parallel register moves, to be resolved later.
+ Register source_reg = ToRegister(source);
+ if (target_reg != source_reg) {
+ DCHECK(!register_moves[source_reg.code()].is_valid());
+ register_moves[source_reg.code()] = target_reg;
+ }
+ }
+ }
+
+ void RecordGapMove(compiler::AllocatedOperand source,
+ compiler::AllocatedOperand target,
+ RegisterMoves& register_moves,
+ StackToRegisterMoves& stack_to_register_moves) {
+ if (target.IsRegister()) {
+ RecordGapMove(source, ToRegister(target), register_moves,
+ stack_to_register_moves);
+ return;
+ }
+
+ // stack->stack and reg->stack moves should be executed before registers are
+ // clobbered by reg->reg or stack->reg, so emit them immediately.
+ if (source.IsRegister()) {
+ Register source_reg = ToRegister(source);
+ __ movq(GetStackSlot(target), source_reg);
+ } else {
+ __ movq(kScratchRegister, GetStackSlot(source));
+ __ movq(GetStackSlot(target), kScratchRegister);
+ }
+ }
+
+ void EmitBlockEndGapMoves(UnconditionalControlNode* node,
+ const ProcessingState& state) {
+ BasicBlock* target = node->target();
+ if (!target->has_state()) {
+ __ RecordComment("-- Target has no state, must be a fallthrough");
+ return;
+ }
+
+ int predecessor_id = state.block()->predecessor_id();
+
+ // Save register moves in an array, so that we can resolve them as parallel
+ // moves. Note that the mapping is:
+ //
+ // register_moves[source] = target.
+ RegisterMoves register_moves =
+ repeat<Register::kNumRegisters>(Register::no_reg());
+
+ // Save stack to register moves in an array, so that we can execute them
+ // after the parallel moves have read the register values. Note that the
+ // mapping is:
+ //
+ // stack_to_register_moves[target] = source.
+ StackToRegisterMoves stack_to_register_moves;
+
+ __ RecordComment("-- Gap moves:");
+
+ for (auto entry : target->state()->register_state()) {
+ RegisterMerge* merge;
+ if (LoadMergeState(entry.state, &merge)) {
+ compiler::AllocatedOperand source = merge->operand(predecessor_id);
+ Register target_reg = entry.reg;
+
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- * " << source << " → " << target_reg;
+ __ RecordComment(ss.str());
+ }
+ RecordGapMove(source, target_reg, register_moves,
+ stack_to_register_moves);
+ }
+ }
+
+ if (target->has_phi()) {
+ Phi::List* phis = target->phis();
+ for (Phi* phi : *phis) {
+ compiler::AllocatedOperand source = compiler::AllocatedOperand::cast(
+ phi->input(state.block()->predecessor_id()).operand());
+ compiler::AllocatedOperand target =
+ compiler::AllocatedOperand::cast(phi->result().operand());
+ if (FLAG_code_comments) {
+ std::stringstream ss;
+ ss << "-- * " << source << " → " << target << " (n"
+ << graph_labeller()->NodeId(phi) << ")";
+ __ RecordComment(ss.str());
+ }
+ RecordGapMove(source, target, register_moves, stack_to_register_moves);
+ }
+ }
+
+#define EMIT_MOVE_FOR_REG(Name) EmitParallelMoveChain(Name, register_moves);
+ ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
+#undef EMIT_MOVE_FOR_REG
+
+#define EMIT_MOVE_FOR_REG(Name) \
+ EmitStackToRegisterGapMove(stack_to_register_moves[Name.code()], Name);
+ ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
+#undef EMIT_MOVE_FOR_REG
+ }
+
+ Isolate* isolate() const { return code_gen_state_->isolate(); }
+ MacroAssembler* masm() const { return code_gen_state_->masm(); }
+ MaglevGraphLabeller* graph_labeller() const {
+ return code_gen_state_->graph_labeller();
+ }
+ SafepointTableBuilder* safepoint_table_builder() const {
+ return code_gen_state_->safepoint_table_builder();
+ }
+
+ private:
+ MaglevCodeGenState* code_gen_state_;
+};
+
+} // namespace
+
+class MaglevCodeGeneratorImpl final {
+ public:
+ static MaybeHandle<Code> Generate(MaglevCompilationUnit* compilation_unit,
+ Graph* graph) {
+ return MaglevCodeGeneratorImpl(compilation_unit, graph).Generate();
+ }
+
+ private:
+ MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
+ : safepoint_table_builder_(compilation_unit->zone()),
+ code_gen_state_(compilation_unit, safepoint_table_builder()),
+ processor_(compilation_unit, &code_gen_state_),
+ graph_(graph) {}
+
+ MaybeHandle<Code> Generate() {
+ EmitCode();
+ if (code_gen_state_.found_unsupported_code_paths()) return {};
+ EmitMetadata();
+ return BuildCodeObject();
+ }
+
+ void EmitCode() { processor_.ProcessGraph(graph_); }
+
+ void EmitMetadata() {
+ // Final alignment before starting on the metadata section.
+ masm()->Align(Code::kMetadataAlignment);
+
+ safepoint_table_builder()->Emit(masm(),
+ stack_slot_count_with_fixed_frame());
+ }
+
+ MaybeHandle<Code> BuildCodeObject() {
+ CodeDesc desc;
+ static constexpr int kNoHandlerTableOffset = 0;
+ masm()->GetCode(isolate(), &desc, safepoint_table_builder(),
+ kNoHandlerTableOffset);
+ return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
+ .set_stack_slots(stack_slot_count_with_fixed_frame())
+ .TryBuild();
+ }
+
+ int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
+ int stack_slot_count_with_fixed_frame() const {
+ return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
+ }
+
+ Isolate* isolate() const {
+ return code_gen_state_.compilation_unit()->isolate();
+ }
+ MacroAssembler* masm() { return code_gen_state_.masm(); }
+ SafepointTableBuilder* safepoint_table_builder() {
+ return &safepoint_table_builder_;
+ }
+
+ SafepointTableBuilder safepoint_table_builder_;
+ MaglevCodeGenState code_gen_state_;
+ GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
+ Graph* const graph_;
+};
+
+// static
+MaybeHandle<Code> MaglevCodeGenerator::Generate(
+ MaglevCompilationUnit* compilation_unit, Graph* graph) {
+ return MaglevCodeGeneratorImpl::Generate(compilation_unit, graph);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-code-generator.h b/deps/v8/src/maglev/maglev-code-generator.h
new file mode 100644
index 0000000000..ea584cd179
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-code-generator.h
@@ -0,0 +1,27 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
+#define V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class Graph;
+class MaglevCompilationUnit;
+
+class MaglevCodeGenerator : public AllStatic {
+ public:
+ static MaybeHandle<Code> Generate(MaglevCompilationUnit* compilation_unit,
+ Graph* graph);
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
diff --git a/deps/v8/src/maglev/maglev-compilation-info.cc b/deps/v8/src/maglev/maglev-compilation-info.cc
new file mode 100644
index 0000000000..630d341a66
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-info.cc
@@ -0,0 +1,123 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-compilation-info.h"
+
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/persistent-handles.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-compiler.h"
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/objects/js-function-inl.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/locked-queue-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+namespace {
+
+constexpr char kMaglevZoneName[] = "maglev-compilation-job-zone";
+
+class V8_NODISCARD MaglevCompilationHandleScope final {
+ public:
+ MaglevCompilationHandleScope(Isolate* isolate,
+ maglev::MaglevCompilationInfo* info)
+ : info_(info),
+ persistent_(isolate),
+ exported_info_(info),
+ canonical_(isolate, &exported_info_) {
+ info->ReopenHandlesInNewHandleScope(isolate);
+ }
+
+ ~MaglevCompilationHandleScope() {
+ info_->set_persistent_handles(persistent_.Detach());
+ }
+
+ private:
+ maglev::MaglevCompilationInfo* const info_;
+ PersistentHandlesScope persistent_;
+ ExportedMaglevCompilationInfo exported_info_;
+ CanonicalHandleScopeForMaglev canonical_;
+};
+
+} // namespace
+
+MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
+ Handle<JSFunction> function)
+ : zone_(isolate->allocator(), kMaglevZoneName),
+ isolate_(isolate),
+ broker_(new compiler::JSHeapBroker(
+ isolate, zone(), FLAG_trace_heap_broker, CodeKind::MAGLEV)),
+ shared_(function->shared(), isolate),
+ function_(function)
+#define V(Name) , Name##_(FLAG_##Name)
+ MAGLEV_COMPILATION_FLAG_LIST(V)
+#undef V
+{
+ DCHECK(FLAG_maglev);
+
+ MaglevCompilationHandleScope compilation(isolate, this);
+
+ compiler::CompilationDependencies* deps =
+ zone()->New<compiler::CompilationDependencies>(broker(), zone());
+ USE(deps); // The deps register themselves in the heap broker.
+
+ broker()->SetTargetNativeContextRef(
+ handle(function->native_context(), isolate));
+ broker()->InitializeAndStartSerializing();
+ broker()->StopSerializing();
+
+ toplevel_compilation_unit_ =
+ MaglevCompilationUnit::New(zone(), this, function);
+}
+
+MaglevCompilationInfo::~MaglevCompilationInfo() = default;
+
+void MaglevCompilationInfo::set_graph_labeller(
+ MaglevGraphLabeller* graph_labeller) {
+ graph_labeller_.reset(graph_labeller);
+}
+
+void MaglevCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
+ DCHECK(!shared_.is_null());
+ shared_ = handle(*shared_, isolate);
+ DCHECK(!function_.is_null());
+ function_ = handle(*function_, isolate);
+}
+
+void MaglevCompilationInfo::set_persistent_handles(
+ std::unique_ptr<PersistentHandles>&& persistent_handles) {
+ DCHECK_NULL(ph_);
+ ph_ = std::move(persistent_handles);
+ DCHECK_NOT_NULL(ph_);
+}
+
+std::unique_ptr<PersistentHandles>
+MaglevCompilationInfo::DetachPersistentHandles() {
+ DCHECK_NOT_NULL(ph_);
+ return std::move(ph_);
+}
+
+void MaglevCompilationInfo::set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
+ DCHECK_NULL(canonical_handles_);
+ canonical_handles_ = std::move(canonical_handles);
+ DCHECK_NOT_NULL(canonical_handles_);
+}
+
+std::unique_ptr<CanonicalHandlesMap>
+MaglevCompilationInfo::DetachCanonicalHandles() {
+ DCHECK_NOT_NULL(canonical_handles_);
+ return std::move(canonical_handles_);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compilation-info.h b/deps/v8/src/maglev/maglev-compilation-info.h
new file mode 100644
index 0000000000..70490de218
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-info.h
@@ -0,0 +1,137 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
+#define V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
+
+#include <memory>
+
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class PersistentHandles;
+class SharedFunctionInfo;
+class Zone;
+
+namespace compiler {
+class JSHeapBroker;
+}
+
+namespace maglev {
+
+class Graph;
+class MaglevCompilationUnit;
+class MaglevGraphLabeller;
+
+#define MAGLEV_COMPILATION_FLAG_LIST(V) \
+ V(code_comments) \
+ V(maglev) \
+ V(print_maglev_code) \
+ V(print_maglev_graph) \
+ V(trace_maglev_regalloc)
+
+class MaglevCompilationInfo final {
+ public:
+ static std::unique_ptr<MaglevCompilationInfo> New(
+ Isolate* isolate, Handle<JSFunction> function) {
+ // Doesn't use make_unique due to the private ctor.
+ return std::unique_ptr<MaglevCompilationInfo>(
+ new MaglevCompilationInfo(isolate, function));
+ }
+ ~MaglevCompilationInfo();
+
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() { return &zone_; }
+ compiler::JSHeapBroker* broker() const { return broker_.get(); }
+ MaglevCompilationUnit* toplevel_compilation_unit() const {
+ return toplevel_compilation_unit_;
+ }
+ Handle<JSFunction> function() const { return function_; }
+
+ bool has_graph_labeller() const { return !!graph_labeller_; }
+ void set_graph_labeller(MaglevGraphLabeller* graph_labeller);
+ MaglevGraphLabeller* graph_labeller() const {
+ DCHECK(has_graph_labeller());
+ return graph_labeller_.get();
+ }
+
+ void set_graph(Graph* graph) { graph_ = graph; }
+ Graph* graph() const { return graph_; }
+
+ void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
+ MaybeHandle<CodeT> codet() const { return codet_; }
+
+ // Flag accessors (for thread-safe access to global flags).
+ // TODO(v8:7700): Consider caching these.
+#define V(Name) \
+ bool Name() const { return Name##_; }
+ MAGLEV_COMPILATION_FLAG_LIST(V)
+#undef V
+
+ // Must be called from within a MaglevCompilationHandleScope. Transfers owned
+ // handles (e.g. shared_, function_) to the new scope.
+ void ReopenHandlesInNewHandleScope(Isolate* isolate);
+
+ // Persistent and canonical handles are passed back and forth between the
+ // Isolate, this info, and the LocalIsolate.
+ void set_persistent_handles(
+ std::unique_ptr<PersistentHandles>&& persistent_handles);
+ std::unique_ptr<PersistentHandles> DetachPersistentHandles();
+ void set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles);
+ std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
+
+ private:
+ MaglevCompilationInfo(Isolate* isolate, Handle<JSFunction> function);
+
+ Zone zone_;
+ Isolate* const isolate_;
+ const std::unique_ptr<compiler::JSHeapBroker> broker_;
+ // Must be initialized late since it requires an initialized heap broker.
+ MaglevCompilationUnit* toplevel_compilation_unit_ = nullptr;
+
+ Handle<SharedFunctionInfo> shared_;
+ Handle<JSFunction> function_;
+
+ std::unique_ptr<MaglevGraphLabeller> graph_labeller_;
+
+ // Produced off-thread during ExecuteJobImpl.
+ Graph* graph_ = nullptr;
+
+ // Produced during FinalizeJobImpl.
+ MaybeHandle<CodeT> codet_;
+
+#define V(Name) const bool Name##_;
+ MAGLEV_COMPILATION_FLAG_LIST(V)
+#undef V
+
+ // 1) PersistentHandles created via PersistentHandlesScope inside of
+ // CompilationHandleScope.
+ // 2) Owned by MaglevCompilationInfo.
+ // 3) Owned by the broker's LocalHeap when entering the LocalHeapScope.
+ // 4) Back to MaglevCompilationInfo when exiting the LocalHeapScope.
+ //
+ // TODO(jgruber,v8:7700): Update this comment:
+ //
+ // In normal execution it gets destroyed when PipelineData gets destroyed.
+ // There is a special case in GenerateCodeForTesting where the JSHeapBroker
+ // will not be retired in that same method. In this case, we need to re-attach
+ // the PersistentHandles container to the JSHeapBroker.
+ std::unique_ptr<PersistentHandles> ph_;
+
+ // Canonical handles follow the same path as described by the persistent
+ // handles above. The only difference is that is created in the
+ // CanonicalHandleScope(i.e step 1) is different).
+ std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.cc b/deps/v8/src/maglev/maglev-compilation-unit.cc
new file mode 100644
index 0000000000..f35f418de7
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-unit.cc
@@ -0,0 +1,45 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-compilation-unit.h"
+
+#include "src/compiler/js-heap-broker.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/objects/js-function-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationInfo* info,
+ Handle<JSFunction> function)
+ : info_(info),
+ bytecode_(
+ MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
+ feedback_(MakeRef(broker(), function->feedback_vector())),
+ bytecode_analysis_(bytecode_.object(), zone(), BytecodeOffset::None(),
+ true),
+ register_count_(bytecode_.register_count()),
+ parameter_count_(bytecode_.parameter_count()) {}
+
+compiler::JSHeapBroker* MaglevCompilationUnit::broker() const {
+ return info_->broker();
+}
+
+Isolate* MaglevCompilationUnit::isolate() const { return info_->isolate(); }
+
+Zone* MaglevCompilationUnit::zone() const { return info_->zone(); }
+
+bool MaglevCompilationUnit::has_graph_labeller() const {
+ return info_->has_graph_labeller();
+}
+
+MaglevGraphLabeller* MaglevCompilationUnit::graph_labeller() const {
+ DCHECK(has_graph_labeller());
+ return info_->graph_labeller();
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.h b/deps/v8/src/maglev/maglev-compilation-unit.h
new file mode 100644
index 0000000000..52e1a775d6
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compilation-unit.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
+#define V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/heap-refs.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevCompilationInfo;
+class MaglevGraphLabeller;
+
+// Per-unit data, i.e. once per top-level function and once per inlined
+// function.
+class MaglevCompilationUnit : public ZoneObject {
+ public:
+ static MaglevCompilationUnit* New(Zone* zone, MaglevCompilationInfo* data,
+ Handle<JSFunction> function) {
+ return zone->New<MaglevCompilationUnit>(data, function);
+ }
+ MaglevCompilationUnit(MaglevCompilationInfo* data,
+ Handle<JSFunction> function);
+
+ MaglevCompilationInfo* info() const { return info_; }
+ compiler::JSHeapBroker* broker() const;
+ Isolate* isolate() const;
+ Zone* zone() const;
+ int register_count() const { return register_count_; }
+ int parameter_count() const { return parameter_count_; }
+ bool has_graph_labeller() const;
+ MaglevGraphLabeller* graph_labeller() const;
+ const compiler::BytecodeArrayRef& bytecode() const { return bytecode_; }
+ const compiler::FeedbackVectorRef& feedback() const { return feedback_; }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return bytecode_analysis_;
+ }
+
+ private:
+ MaglevCompilationInfo* const info_;
+ const compiler::BytecodeArrayRef bytecode_;
+ const compiler::FeedbackVectorRef feedback_;
+ const compiler::BytecodeAnalysis bytecode_analysis_;
+ const int register_count_;
+ const int parameter_count_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
diff --git a/deps/v8/src/maglev/maglev-compiler.cc b/deps/v8/src/maglev/maglev-compiler.cc
new file mode 100644
index 0000000000..f4a23d869e
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compiler.cc
@@ -0,0 +1,209 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-compiler.h"
+
+#include <iomanip>
+#include <ostream>
+#include <type_traits>
+
+#include "src/base/iterator.h"
+#include "src/base/logging.h"
+#include "src/base/threaded-list.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+#include "src/common/globals.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/heap-refs.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/frames.h"
+#include "src/ic/handler-configuration.h"
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-code-generator.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-graph-builder.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc.h"
+#include "src/maglev/maglev-vreg-allocator.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/js-function.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class NumberingProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates = false;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+ void Process(NodeBase* node, const ProcessingState& state) {
+ node->set_id(node_id_++);
+ }
+
+ private:
+ uint32_t node_id_;
+};
+
+class UseMarkingProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates = true;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+ void Process(NodeBase* node, const ProcessingState& state) {
+ if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
+ for (Input& input : *node) {
+ input.node()->mark_use(node->id(), &input);
+ }
+ }
+
+ void Process(Phi* node, const ProcessingState& state) {
+ // Don't mark Phi uses when visiting the node, because of loop phis.
+ // Instead, they'll be visited while processing Jump/JumpLoop.
+ }
+
+ // Specialize the two unconditional jumps to extend their Phis' inputs' live
+ // ranges.
+
+ void Process(JumpLoop* node, const ProcessingState& state) {
+ int i = state.block()->predecessor_id();
+ BasicBlock* target = node->target();
+ if (!target->has_phi()) return;
+ uint32_t use = node->id();
+ for (Phi* phi : *target->phis()) {
+ ValueNode* input = phi->input(i).node();
+ input->mark_use(use, &phi->input(i));
+ }
+ }
+ void Process(Jump* node, const ProcessingState& state) {
+ int i = state.block()->predecessor_id();
+ BasicBlock* target = node->target();
+ if (!target->has_phi()) return;
+ uint32_t use = node->id();
+ for (Phi* phi : *target->phis()) {
+ ValueNode* input = phi->input(i).node();
+ input->mark_use(use, &phi->input(i));
+ }
+ }
+
+ private:
+ void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
+ const InterpreterFrameState* checkpoint_state =
+ state.checkpoint_frame_state();
+ int use_id = node->id();
+
+ for (int i = 0; i < state.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ ValueNode* node = checkpoint_state->get(reg);
+ if (node) node->mark_use(use_id, nullptr);
+ }
+ for (int i = 0; i < state.register_count(); i++) {
+ interpreter::Register reg = interpreter::Register(i);
+ ValueNode* node = checkpoint_state->get(reg);
+ if (node) node->mark_use(use_id, nullptr);
+ }
+ if (checkpoint_state->accumulator()) {
+ checkpoint_state->accumulator()->mark_use(use_id, nullptr);
+ }
+ }
+};
+
+// static
+void MaglevCompiler::Compile(MaglevCompilationUnit* toplevel_compilation_unit) {
+ MaglevCompiler compiler(toplevel_compilation_unit);
+ compiler.Compile();
+}
+
+void MaglevCompiler::Compile() {
+ compiler::UnparkedScopeIfNeeded unparked_scope(broker());
+
+ // Build graph.
+ if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
+ FLAG_trace_maglev_regalloc) {
+ toplevel_compilation_unit_->info()->set_graph_labeller(
+ new MaglevGraphLabeller());
+ }
+
+ MaglevGraphBuilder graph_builder(toplevel_compilation_unit_);
+
+ graph_builder.Build();
+
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ if (graph_builder.found_unsupported_bytecode()) {
+ return;
+ }
+
+ if (FLAG_print_maglev_graph) {
+ std::cout << "After graph buiding" << std::endl;
+ PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
+ }
+
+ {
+ GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
+ MaglevVregAllocator>
+ processor(toplevel_compilation_unit_);
+ processor.ProcessGraph(graph_builder.graph());
+ }
+
+ if (FLAG_print_maglev_graph) {
+ std::cout << "After node processor" << std::endl;
+ PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
+ }
+
+ StraightForwardRegisterAllocator allocator(toplevel_compilation_unit_,
+ graph_builder.graph());
+
+ if (FLAG_print_maglev_graph) {
+ std::cout << "After register allocation" << std::endl;
+ PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
+ }
+
+ // Stash the compiled graph on the compilation info.
+ toplevel_compilation_unit_->info()->set_graph(graph_builder.graph());
+}
+
+// static
+MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
+ MaglevCompilationUnit* toplevel_compilation_unit) {
+ Graph* const graph = toplevel_compilation_unit->info()->graph();
+ if (graph == nullptr) return {}; // Compilation failed.
+
+ Handle<Code> code;
+ if (!MaglevCodeGenerator::Generate(toplevel_compilation_unit, graph)
+ .ToHandle(&code)) {
+ return {};
+ }
+
+ compiler::JSHeapBroker* const broker = toplevel_compilation_unit->broker();
+ const bool deps_committed_successfully = broker->dependencies()->Commit(code);
+ CHECK(deps_committed_successfully);
+
+ if (FLAG_print_maglev_code) {
+ code->Print();
+ }
+
+ Isolate* const isolate = toplevel_compilation_unit->isolate();
+ return ToCodeT(code, isolate);
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compiler.h b/deps/v8/src/maglev/maglev-compiler.h
new file mode 100644
index 0000000000..79b71552d1
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-compiler.h
@@ -0,0 +1,53 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_COMPILER_H_
+#define V8_MAGLEV_MAGLEV_COMPILER_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/heap-refs.h"
+#include "src/maglev/maglev-compilation-unit.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class JSHeapBroker;
+}
+
+namespace maglev {
+
+class Graph;
+
+class MaglevCompiler {
+ public:
+ // May be called from any thread.
+ static void Compile(MaglevCompilationUnit* toplevel_compilation_unit);
+
+ // Called on the main thread after Compile has completed.
+ // TODO(v8:7700): Move this to a different class?
+ static MaybeHandle<CodeT> GenerateCode(
+ MaglevCompilationUnit* toplevel_compilation_unit);
+
+ private:
+ explicit MaglevCompiler(MaglevCompilationUnit* toplevel_compilation_unit)
+ : toplevel_compilation_unit_(toplevel_compilation_unit) {}
+
+ void Compile();
+
+ compiler::JSHeapBroker* broker() const {
+ return toplevel_compilation_unit_->broker();
+ }
+ Zone* zone() { return toplevel_compilation_unit_->zone(); }
+ Isolate* isolate() { return toplevel_compilation_unit_->isolate(); }
+
+ MaglevCompilationUnit* const toplevel_compilation_unit_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_COMPILER_H_
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
new file mode 100644
index 0000000000..762de2455a
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
@@ -0,0 +1,194 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-concurrent-dispatcher.h"
+
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/persistent-handles.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-compiler.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/objects/js-function-inl.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/locked-queue-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+void JSHeapBroker::AttachLocalIsolateForMaglev(
+ maglev::MaglevCompilationInfo* info, LocalIsolate* local_isolate) {
+ set_canonical_handles(info->DetachCanonicalHandles());
+ DCHECK_NULL(local_isolate_);
+ local_isolate_ = local_isolate;
+ DCHECK_NOT_NULL(local_isolate_);
+ local_isolate_->heap()->AttachPersistentHandles(
+ info->DetachPersistentHandles());
+}
+
+void JSHeapBroker::DetachLocalIsolateForMaglev(
+ maglev::MaglevCompilationInfo* info) {
+ DCHECK_NULL(ph_);
+ DCHECK_NOT_NULL(local_isolate_);
+ std::unique_ptr<PersistentHandles> ph =
+ local_isolate_->heap()->DetachPersistentHandles();
+ local_isolate_ = nullptr;
+ info->set_canonical_handles(DetachCanonicalHandles());
+ info->set_persistent_handles(std::move(ph));
+}
+
+} // namespace compiler
+
+namespace maglev {
+
+namespace {
+
+constexpr char kMaglevCompilerName[] = "Maglev";
+
+// LocalIsolateScope encapsulates the phase where persistent handles are
+// attached to the LocalHeap inside {local_isolate}.
+class V8_NODISCARD LocalIsolateScope final {
+ public:
+ explicit LocalIsolateScope(MaglevCompilationInfo* info,
+ LocalIsolate* local_isolate)
+ : info_(info) {
+ info_->broker()->AttachLocalIsolateForMaglev(info_, local_isolate);
+ }
+
+ ~LocalIsolateScope() { info_->broker()->DetachLocalIsolateForMaglev(info_); }
+
+ private:
+ MaglevCompilationInfo* const info_;
+};
+
+} // namespace
+
+Zone* ExportedMaglevCompilationInfo::zone() const { return info_->zone(); }
+
+void ExportedMaglevCompilationInfo::set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
+ info_->set_canonical_handles(std::move(canonical_handles));
+}
+
+// static
+std::unique_ptr<MaglevCompilationJob> MaglevCompilationJob::New(
+ Isolate* isolate, Handle<JSFunction> function) {
+ auto info = maglev::MaglevCompilationInfo::New(isolate, function);
+ return std::unique_ptr<MaglevCompilationJob>(
+ new MaglevCompilationJob(std::move(info)));
+}
+
+MaglevCompilationJob::MaglevCompilationJob(
+ std::unique_ptr<MaglevCompilationInfo>&& info)
+ : OptimizedCompilationJob(nullptr, kMaglevCompilerName),
+ info_(std::move(info)) {
+ // TODO(jgruber, v8:7700): Remove the OptimizedCompilationInfo (which should
+ // be renamed to TurbofanCompilationInfo) from OptimizedCompilationJob.
+ DCHECK(FLAG_maglev);
+}
+
+MaglevCompilationJob::~MaglevCompilationJob() = default;
+
+CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
+ // TODO(v8:7700): Actual return codes.
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
+ LocalIsolateScope scope{info(), local_isolate};
+ maglev::MaglevCompiler::Compile(info()->toplevel_compilation_unit());
+ // TODO(v8:7700): Actual return codes.
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
+ info()->set_codet(maglev::MaglevCompiler::GenerateCode(
+ info()->toplevel_compilation_unit()));
+ // TODO(v8:7700): Actual return codes.
+ return CompilationJob::SUCCEEDED;
+}
+
+// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
+// processing the incoming queue on a worker thread.
+class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
+ public:
+ explicit JobTask(MaglevConcurrentDispatcher* dispatcher)
+ : dispatcher_(dispatcher) {}
+
+ void Run(JobDelegate* delegate) override {
+ LocalIsolate local_isolate(isolate(), ThreadKind::kBackground);
+ DCHECK(local_isolate.heap()->IsParked());
+
+ while (!incoming_queue()->IsEmpty() && !delegate->ShouldYield()) {
+ std::unique_ptr<MaglevCompilationJob> job;
+ if (!incoming_queue()->Dequeue(&job)) break;
+ DCHECK_NOT_NULL(job);
+ RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement.
+ CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ outgoing_queue()->Enqueue(std::move(job));
+ }
+ // TODO(v8:7700):
+ // isolate_->stack_guard()->RequestInstallMaglevCode();
+ }
+
+ size_t GetMaxConcurrency(size_t) const override {
+ return incoming_queue()->size();
+ }
+
+ private:
+ Isolate* isolate() const { return dispatcher_->isolate_; }
+ QueueT* incoming_queue() const { return &dispatcher_->incoming_queue_; }
+ QueueT* outgoing_queue() const { return &dispatcher_->outgoing_queue_; }
+
+ MaglevConcurrentDispatcher* const dispatcher_;
+ const Handle<JSFunction> function_;
+};
+
+MaglevConcurrentDispatcher::MaglevConcurrentDispatcher(Isolate* isolate)
+ : isolate_(isolate) {
+ if (FLAG_concurrent_recompilation && FLAG_maglev) {
+ job_handle_ = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible, std::make_unique<JobTask>(this));
+ DCHECK(is_enabled());
+ } else {
+ DCHECK(!is_enabled());
+ }
+}
+
+MaglevConcurrentDispatcher::~MaglevConcurrentDispatcher() {
+ if (is_enabled() && job_handle_->IsValid()) {
+ // Wait for the job handle to complete, so that we know the queue
+ // pointers are safe.
+ job_handle_->Cancel();
+ }
+}
+
+void MaglevConcurrentDispatcher::EnqueueJob(
+ std::unique_ptr<MaglevCompilationJob>&& job) {
+ DCHECK(is_enabled());
+ // TODO(v8:7700): RCS.
+ // RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileMaglev);
+ incoming_queue_.Enqueue(std::move(job));
+ job_handle_->NotifyConcurrencyIncrease();
+}
+
+void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
+ while (!outgoing_queue_.IsEmpty()) {
+ std::unique_ptr<MaglevCompilationJob> job;
+ outgoing_queue_.Dequeue(&job);
+ CompilationJob::Status status = job->FinalizeJob(isolate_);
+ // TODO(v8:7700): Use the result.
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
new file mode 100644
index 0000000000..0b2a086e5a
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
@@ -0,0 +1,92 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_CONCURRENT_DISPATCHER_H_
+#define V8_MAGLEV_MAGLEV_CONCURRENT_DISPATCHER_H_
+
+#ifdef V8_ENABLE_MAGLEV
+
+#include <memory>
+
+#include "src/codegen/compiler.h" // For OptimizedCompilationJob.
+#include "src/utils/locked-queue.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace maglev {
+
+class MaglevCompilationInfo;
+
+// Exports needed functionality without exposing implementation details.
+class ExportedMaglevCompilationInfo final {
+ public:
+ explicit ExportedMaglevCompilationInfo(MaglevCompilationInfo* info)
+ : info_(info) {}
+
+ Zone* zone() const;
+ void set_canonical_handles(
+ std::unique_ptr<CanonicalHandlesMap>&& canonical_handles);
+
+ private:
+ MaglevCompilationInfo* const info_;
+};
+
+// The job is a single actual compilation task.
+class MaglevCompilationJob final : public OptimizedCompilationJob {
+ public:
+ static std::unique_ptr<MaglevCompilationJob> New(Isolate* isolate,
+ Handle<JSFunction> function);
+ virtual ~MaglevCompilationJob();
+
+ Status PrepareJobImpl(Isolate* isolate) override;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) override;
+ Status FinalizeJobImpl(Isolate* isolate) override;
+
+ private:
+ explicit MaglevCompilationJob(std::unique_ptr<MaglevCompilationInfo>&& info);
+
+ MaglevCompilationInfo* info() const { return info_.get(); }
+
+ const std::unique_ptr<MaglevCompilationInfo> info_;
+};
+
+// The public API for Maglev concurrent compilation.
+// Keep this as minimal as possible.
+class MaglevConcurrentDispatcher final {
+ class JobTask;
+
+ // TODO(jgruber): There's no reason to use locking queues here, we only use
+ // them for simplicity - consider replacing with lock-free data structures.
+ using QueueT = LockedQueue<std::unique_ptr<MaglevCompilationJob>>;
+
+ public:
+ explicit MaglevConcurrentDispatcher(Isolate* isolate);
+ ~MaglevConcurrentDispatcher();
+
+ // Called from the main thread.
+ void EnqueueJob(std::unique_ptr<MaglevCompilationJob>&& job);
+
+ // Called from the main thread.
+ void FinalizeFinishedJobs();
+
+ bool is_enabled() const { return static_cast<bool>(job_handle_); }
+
+ private:
+ Isolate* const isolate_;
+ std::unique_ptr<JobHandle> job_handle_;
+ QueueT incoming_queue_;
+ QueueT outgoing_queue_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ENABLE_MAGLEV
+
+#endif // V8_MAGLEV_MAGLEV_CONCURRENT_DISPATCHER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-builder.cc b/deps/v8/src/maglev/maglev-graph-builder.cc
new file mode 100644
index 0000000000..b38bece1d5
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-builder.cc
@@ -0,0 +1,616 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-graph-builder.h"
+
+#include "src/compiler/feedback-source.h"
+#include "src/compiler/heap-refs.h"
+#include "src/handles/maybe-handles-inl.h"
+#include "src/ic/handler-configuration.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/slots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace maglev {
+
+MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
+ : compilation_unit_(compilation_unit),
+ iterator_(bytecode().object()),
+ jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
+ // Overallocate merge_states_ by one to allow always looking up the
+ // next offset.
+ merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>(
+ bytecode().length() + 1)),
+ graph_(Graph::New(zone())),
+ current_interpreter_frame_(*compilation_unit_) {
+ memset(merge_states_, 0,
+ bytecode().length() * sizeof(InterpreterFrameState*));
+ // Default construct basic block refs.
+ // TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
+ for (int i = 0; i < bytecode().length(); ++i) {
+ new (&jump_targets_[i]) BasicBlockRef();
+ }
+
+ CalculatePredecessorCounts();
+
+ for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) {
+ int offset = offset_and_info.first;
+ const compiler::LoopInfo& loop_info = offset_and_info.second;
+
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(offset);
+
+ merge_states_[offset] = zone()->New<MergePointInterpreterFrameState>(
+ *compilation_unit_, offset, NumPredecessors(offset), liveness,
+ &loop_info);
+ }
+
+ current_block_ = zone()->New<BasicBlock>(nullptr);
+ block_offset_ = -1;
+
+ for (int i = 0; i < parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
+ }
+
+ // TODO(leszeks): Extract out a separate "incoming context/closure" nodes,
+ // to be able to read in the machine register but also use the frame-spilled
+ // slot.
+ interpreter::Register regs[] = {interpreter::Register::current_context(),
+ interpreter::Register::function_closure()};
+ for (interpreter::Register& reg : regs) {
+ current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
+ }
+
+ interpreter::Register new_target_or_generator_register =
+ bytecode().incoming_new_target_or_generator_register();
+
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(0);
+ int register_index = 0;
+ // TODO(leszeks): Don't emit if not needed.
+ ValueNode* undefined_value =
+ AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
+ if (new_target_or_generator_register.is_valid()) {
+ int new_target_index = new_target_or_generator_register.index();
+ for (; register_index < new_target_index; register_index++) {
+ StoreRegister(interpreter::Register(register_index), undefined_value,
+ liveness);
+ }
+ StoreRegister(
+ new_target_or_generator_register,
+ // TODO(leszeks): Expose in Graph.
+ AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
+ liveness);
+ register_index++;
+ }
+ for (; register_index < register_count(); register_index++) {
+ StoreRegister(interpreter::Register(register_index), undefined_value,
+ liveness);
+ }
+
+ BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
+ MergeIntoFrameState(first_block, 0);
+}
+
+// TODO(v8:7700): Clean up after all bytecodes are supported.
+#define MAGLEV_UNIMPLEMENTED(BytecodeName) \
+ do { \
+ std::cerr << "Maglev: Can't compile, bytecode " #BytecodeName \
+ " is not supported\n"; \
+ found_unsupported_bytecode_ = true; \
+ this_field_will_be_unused_once_all_bytecodes_are_supported_ = true; \
+ } while (false)
+
+#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
+ void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
+
+template <Operation kOperation, typename... Args>
+ValueNode* MaglevGraphBuilder::AddNewOperationNode(
+ std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ switch (kOperation) {
+#define CASE(Name) \
+ case Operation::k##Name: \
+ return AddNewNode<Generic##Name>(inputs, std::forward<Args>(args)...);
+ OPERATION_LIST(CASE)
+#undef CASE
+ }
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
+ FeedbackSlot slot_index = GetSlotOperand(0);
+ ValueNode* value = GetAccumulator();
+ ValueNode* node = AddNewOperationNode<kOperation>(
+ {value}, compiler::FeedbackSource{feedback(), slot_index});
+ SetAccumulator(node);
+ MarkPossibleSideEffect();
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
+ ValueNode* left = LoadRegister(0);
+ FeedbackSlot slot_index = GetSlotOperand(1);
+ ValueNode* right = GetAccumulator();
+ ValueNode* node = AddNewOperationNode<kOperation>(
+ {left, right}, compiler::FeedbackSource{feedback(), slot_index});
+ SetAccumulator(node);
+ MarkPossibleSideEffect();
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::VisitUnaryOperation() {
+ // TODO(victorgomes): Use feedback info and create optimized versions.
+ BuildGenericUnaryOperationNode<kOperation>();
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::VisitBinaryOperation() {
+ // TODO(victorgomes): Use feedback info and create optimized versions.
+ BuildGenericBinaryOperationNode<kOperation>();
+}
+
+void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
+
+void MaglevGraphBuilder::VisitLdaZero() {
+ SetAccumulator(AddNewNode<SmiConstant>({}, Smi::zero()));
+}
+void MaglevGraphBuilder::VisitLdaSmi() {
+ Smi constant = Smi::FromInt(iterator_.GetImmediateOperand(0));
+ SetAccumulator(AddNewNode<SmiConstant>({}, constant));
+}
+void MaglevGraphBuilder::VisitLdaUndefined() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue));
+}
+void MaglevGraphBuilder::VisitLdaNull() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kNullValue));
+}
+void MaglevGraphBuilder::VisitLdaTheHole() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kTheHoleValue));
+}
+void MaglevGraphBuilder::VisitLdaTrue() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kTrueValue));
+}
+void MaglevGraphBuilder::VisitLdaFalse() {
+ SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kFalseValue));
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaConstant)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaCurrentContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableCurrentContextSlot)
+void MaglevGraphBuilder::VisitStar() {
+ StoreRegister(
+ iterator_.GetRegisterOperand(0), GetAccumulator(),
+ bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+}
+void MaglevGraphBuilder::VisitMov() {
+ StoreRegister(
+ iterator_.GetRegisterOperand(1), LoadRegister(0),
+ bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(PushContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(PopContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestReferenceEqual)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndetectable)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndefined)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestTypeOf)
+void MaglevGraphBuilder::VisitLdaGlobal() {
+ // LdaGlobal <name_index> <slot>
+
+ static const int kNameOperandIndex = 0;
+ static const int kSlotOperandIndex = 1;
+
+ compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
+ FeedbackSlot slot_index = GetSlotOperand(kSlotOperandIndex);
+ ValueNode* context = GetContext();
+
+ USE(slot_index); // TODO(v8:7700): Use the feedback info.
+
+ SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
+ MarkPossibleSideEffect();
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaGlobalInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaGlobal)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaCurrentContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupContextSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupGlobalSlot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupSlotInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupContextSlotInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupGlobalSlotInsideTypeof)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaLookupSlot)
+void MaglevGraphBuilder::VisitGetNamedProperty() {
+ // GetNamedProperty <object> <name_index> <slot>
+ ValueNode* object = LoadRegister(0);
+ FeedbackNexus nexus = feedback_nexus(2);
+
+ if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
+ EnsureCheckpoint();
+ AddNewNode<SoftDeopt>({});
+ } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ std::vector<MapAndHandler> maps_and_handlers;
+ nexus.ExtractMapsAndHandlers(&maps_and_handlers);
+ DCHECK_EQ(maps_and_handlers.size(), 1);
+ MapAndHandler& map_and_handler = maps_and_handlers[0];
+ if (map_and_handler.second->IsSmi()) {
+ int handler = map_and_handler.second->ToSmi().value();
+ LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
+ if (kind == LoadHandler::Kind::kField &&
+ !LoadHandler::IsWasmStructBits::decode(handler)) {
+ EnsureCheckpoint();
+ AddNewNode<CheckMaps>({object},
+ MakeRef(broker(), map_and_handler.first));
+ SetAccumulator(AddNewNode<LoadField>({object}, handler));
+ return;
+ }
+ }
+ }
+
+ ValueNode* context = GetContext();
+ compiler::NameRef name = GetRefOperand<Name>(1);
+ SetAccumulator(AddNewNode<LoadNamedGeneric>({context, object}, name));
+ MarkPossibleSideEffect();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetNamedPropertyFromSuper)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetKeyedProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LdaModuleVariable)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaModuleVariable)
+
+void MaglevGraphBuilder::VisitSetNamedProperty() {
+ // SetNamedProperty <object> <name_index> <slot>
+ ValueNode* object = LoadRegister(0);
+ FeedbackNexus nexus = feedback_nexus(2);
+
+ if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
+ EnsureCheckpoint();
+ AddNewNode<SoftDeopt>({});
+ } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ std::vector<MapAndHandler> maps_and_handlers;
+ nexus.ExtractMapsAndHandlers(&maps_and_handlers);
+ DCHECK_EQ(maps_and_handlers.size(), 1);
+ MapAndHandler& map_and_handler = maps_and_handlers[0];
+ if (map_and_handler.second->IsSmi()) {
+ int handler = map_and_handler.second->ToSmi().value();
+ StoreHandler::Kind kind = StoreHandler::KindBits::decode(handler);
+ if (kind == StoreHandler::Kind::kField) {
+ EnsureCheckpoint();
+ AddNewNode<CheckMaps>({object},
+ MakeRef(broker(), map_and_handler.first));
+ ValueNode* value = GetAccumulator();
+ AddNewNode<StoreField>({object, value}, handler);
+ return;
+ }
+ }
+ }
+
+ // TODO(victorgomes): Generic store.
+ MAGLEV_UNIMPLEMENTED(VisitSetNamedProperty);
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(DefineNamedOwnProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SetKeyedProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnProperty)
+MAGLEV_UNIMPLEMENTED_BYTECODE(StaInArrayLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnPropertyInLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CollectTypeProfile)
+
+void MaglevGraphBuilder::VisitAdd() { VisitBinaryOperation<Operation::kAdd>(); }
+void MaglevGraphBuilder::VisitSub() {
+ VisitBinaryOperation<Operation::kSubtract>();
+}
+void MaglevGraphBuilder::VisitMul() {
+ VisitBinaryOperation<Operation::kMultiply>();
+}
+void MaglevGraphBuilder::VisitDiv() {
+ VisitBinaryOperation<Operation::kDivide>();
+}
+void MaglevGraphBuilder::VisitMod() {
+ VisitBinaryOperation<Operation::kModulus>();
+}
+void MaglevGraphBuilder::VisitExp() {
+ VisitBinaryOperation<Operation::kExponentiate>();
+}
+void MaglevGraphBuilder::VisitBitwiseOr() {
+ VisitBinaryOperation<Operation::kBitwiseOr>();
+}
+void MaglevGraphBuilder::VisitBitwiseXor() {
+ VisitBinaryOperation<Operation::kBitwiseXor>();
+}
+void MaglevGraphBuilder::VisitBitwiseAnd() {
+ VisitBinaryOperation<Operation::kBitwiseAnd>();
+}
+void MaglevGraphBuilder::VisitShiftLeft() {
+ VisitBinaryOperation<Operation::kShiftLeft>();
+}
+void MaglevGraphBuilder::VisitShiftRight() {
+ VisitBinaryOperation<Operation::kShiftRight>();
+}
+void MaglevGraphBuilder::VisitShiftRightLogical() {
+ VisitBinaryOperation<Operation::kShiftRightLogical>();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DivSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ModSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ExpSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseOrSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseXorSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi)
+
+void MaglevGraphBuilder::VisitInc() {
+ VisitUnaryOperation<Operation::kIncrement>();
+}
+void MaglevGraphBuilder::VisitDec() {
+ VisitUnaryOperation<Operation::kDecrement>();
+}
+void MaglevGraphBuilder::VisitNegate() {
+ VisitUnaryOperation<Operation::kNegate>();
+}
+void MaglevGraphBuilder::VisitBitwiseNot() {
+ VisitUnaryOperation<Operation::kBitwiseNot>();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToBooleanLogicalNot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(LogicalNot)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertyStrict)
+MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertySloppy)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetSuperConstructor)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallAnyReceiver)
+
+// TODO(leszeks): For all of these:
+// a) Read feedback and implement inlining
+// b) Wrap in a helper.
+void MaglevGraphBuilder::VisitCallProperty() {
+ ValueNode* function = LoadRegister(0);
+
+ interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
+ ValueNode* context = GetContext();
+
+ static constexpr int kTheContext = 1;
+ CallProperty* call_property = AddNewNode<CallProperty>(
+ args.register_count() + kTheContext, function, context);
+ // TODO(leszeks): Move this for loop into the CallProperty constructor,
+ // pre-size the args array.
+ for (int i = 0; i < args.register_count(); ++i) {
+ call_property->set_arg(i, current_interpreter_frame_.get(args[i]));
+ }
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+void MaglevGraphBuilder::VisitCallProperty0() {
+ ValueNode* function = LoadRegister(0);
+ ValueNode* context = GetContext();
+
+ CallProperty* call_property =
+ AddNewNode<CallProperty>({function, context, LoadRegister(1)});
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+void MaglevGraphBuilder::VisitCallProperty1() {
+ ValueNode* function = LoadRegister(0);
+ ValueNode* context = GetContext();
+
+ CallProperty* call_property = AddNewNode<CallProperty>(
+ {function, context, LoadRegister(1), LoadRegister(2)});
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+void MaglevGraphBuilder::VisitCallProperty2() {
+ ValueNode* function = LoadRegister(0);
+ ValueNode* context = GetContext();
+
+ CallProperty* call_property = AddNewNode<CallProperty>(
+ {function, context, LoadRegister(1), LoadRegister(2), LoadRegister(3)});
+ SetAccumulator(call_property);
+ MarkPossibleSideEffect();
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver0)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver1)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver2)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallWithSpread)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntime)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntimeForPair)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CallJSRuntime)
+MAGLEV_UNIMPLEMENTED_BYTECODE(InvokeIntrinsic)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Construct)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict)
+
+void MaglevGraphBuilder::VisitTestLessThan() {
+ VisitBinaryOperation<Operation::kLessThan>();
+}
+void MaglevGraphBuilder::VisitTestLessThanOrEqual() {
+ VisitBinaryOperation<Operation::kLessThanOrEqual>();
+}
+void MaglevGraphBuilder::VisitTestGreaterThan() {
+ VisitBinaryOperation<Operation::kGreaterThan>();
+}
+void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
+ VisitBinaryOperation<Operation::kGreaterThanOrEqual>();
+}
+
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf)
+MAGLEV_UNIMPLEMENTED_BYTECODE(TestIn)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToName)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToNumber)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToNumeric)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToObject)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ToString)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateRegExpLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateArrayLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateArrayFromIterable)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateEmptyArrayLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateObjectLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateEmptyObjectLiteral)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CloneObject)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetTemplateObject)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateClosure)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateBlockContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateCatchContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateFunctionContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateEvalContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateWithContext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateMappedArguments)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateUnmappedArguments)
+MAGLEV_UNIMPLEMENTED_BYTECODE(CreateRestParameter)
+
+void MaglevGraphBuilder::VisitJumpLoop() {
+ int target = iterator_.GetJumpTargetOffset();
+ BasicBlock* block =
+ target == iterator_.current_offset()
+ ? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target])
+ : FinishBlock<JumpLoop>(next_offset(), {},
+ jump_targets_[target].block_ptr());
+
+ merge_states_[target]->MergeLoop(*compilation_unit_,
+ current_interpreter_frame_, block, target);
+ block->set_predecessor_id(0);
+}
+void MaglevGraphBuilder::VisitJump() {
+ BasicBlock* block = FinishBlock<Jump>(
+ next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
+ MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ DCHECK_LT(next_offset(), bytecode().length());
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpConstant)
+void MaglevGraphBuilder::VisitJumpIfNullConstant() { VisitJumpIfNull(); }
+void MaglevGraphBuilder::VisitJumpIfNotNullConstant() { VisitJumpIfNotNull(); }
+void MaglevGraphBuilder::VisitJumpIfUndefinedConstant() {
+ VisitJumpIfUndefined();
+}
+void MaglevGraphBuilder::VisitJumpIfNotUndefinedConstant() {
+ VisitJumpIfNotUndefined();
+}
+void MaglevGraphBuilder::VisitJumpIfUndefinedOrNullConstant() {
+ VisitJumpIfUndefinedOrNull();
+}
+void MaglevGraphBuilder::VisitJumpIfTrueConstant() { VisitJumpIfTrue(); }
+void MaglevGraphBuilder::VisitJumpIfFalseConstant() { VisitJumpIfFalse(); }
+void MaglevGraphBuilder::VisitJumpIfJSReceiverConstant() {
+ VisitJumpIfJSReceiver();
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
+ VisitJumpIfToBooleanTrue();
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
+ VisitJumpIfToBooleanFalse();
+}
+
+void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
+ int target) {
+ if (merge_states_[target] == nullptr) {
+ DCHECK(!bytecode_analysis().IsLoopHeader(target));
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(target);
+ // If there's no target frame state, allocate a new one.
+ merge_states_[target] = zone()->New<MergePointInterpreterFrameState>(
+ *compilation_unit_, current_interpreter_frame_, target,
+ NumPredecessors(target), predecessor, liveness);
+ } else {
+ // If there already is a frame state, merge.
+ merge_states_[target]->Merge(*compilation_unit_, current_interpreter_frame_,
+ predecessor, target);
+ }
+}
+
+void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
+ int false_target) {
+ // TODO(verwaest): Materialize true/false in the respective environments.
+ if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
+ BasicBlock* block = FinishBlock<BranchIfTrue>(next_offset(), {node},
+ &jump_targets_[true_target],
+ &jump_targets_[false_target]);
+ MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+}
+void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
+ int true_target,
+ int false_target) {
+ // TODO(verwaest): Materialize true/false in the respective environments.
+ if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
+ BasicBlock* block = FinishBlock<BranchIfToBooleanTrue>(
+ next_offset(), {node}, &jump_targets_[true_target],
+ &jump_targets_[false_target]);
+ MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
+ BuildBranchIfToBooleanTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
+ next_offset());
+}
+void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
+ BuildBranchIfToBooleanTrue(GetAccumulator(), next_offset(),
+ iterator_.GetJumpTargetOffset());
+}
+void MaglevGraphBuilder::VisitJumpIfTrue() {
+ BuildBranchIfTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
+ next_offset());
+}
+void MaglevGraphBuilder::VisitJumpIfFalse() {
+ BuildBranchIfTrue(GetAccumulator(), next_offset(),
+ iterator_.GetJumpTargetOffset());
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNotNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfUndefined)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNotUndefined)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfUndefinedOrNull)
+MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfJSReceiver)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SwitchOnSmiNoFeedback)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInEnumerate)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInPrepare)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInContinue)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInNext)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ForInStep)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SetPendingMessage)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Throw)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow)
+void MaglevGraphBuilder::VisitReturn() {
+ FinishBlock<Return>(next_offset(), {GetAccumulator()});
+}
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowSuperNotCalledIfHole)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowSuperAlreadyCalledIfNotHole)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowIfNotSuperConstructor)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SwitchOnGeneratorState)
+MAGLEV_UNIMPLEMENTED_BYTECODE(SuspendGenerator)
+MAGLEV_UNIMPLEMENTED_BYTECODE(ResumeGenerator)
+MAGLEV_UNIMPLEMENTED_BYTECODE(GetIterator)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Debugger)
+MAGLEV_UNIMPLEMENTED_BYTECODE(IncBlockCounter)
+MAGLEV_UNIMPLEMENTED_BYTECODE(Abort)
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void MaglevGraphBuilder::Visit##Name() { \
+ StoreRegister( \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
+ GetAccumulator(), \
+ bytecode_analysis().GetOutLivenessFor(iterator_.current_offset())); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
+void MaglevGraphBuilder::VisitWide() { UNREACHABLE(); }
+void MaglevGraphBuilder::VisitExtraWide() { UNREACHABLE(); }
+#define DEBUG_BREAK(Name, ...) \
+ void MaglevGraphBuilder::Visit##Name() { UNREACHABLE(); }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
+#undef DEBUG_BREAK
+void MaglevGraphBuilder::VisitIllegal() { UNREACHABLE(); }
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-graph-builder.h b/deps/v8/src/maglev/maglev-graph-builder.h
new file mode 100644
index 0000000000..da86b80841
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-builder.h
@@ -0,0 +1,383 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
+
+#include <type_traits>
+
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/compiler/heap-refs.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/utils/memcopy.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevGraphBuilder {
+ public:
+ explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit);
+
+ void Build() {
+ for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
+ VisitSingleBytecode();
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ if (found_unsupported_bytecode()) break;
+ }
+ }
+
+ Graph* graph() const { return graph_; }
+
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ bool found_unsupported_bytecode() const {
+ return found_unsupported_bytecode_;
+ }
+
+ private:
+ BasicBlock* CreateEmptyBlock(int offset, BasicBlock* predecessor) {
+ DCHECK_NULL(current_block_);
+ current_block_ = zone()->New<BasicBlock>(nullptr);
+ BasicBlock* result = CreateBlock<Jump>({}, &jump_targets_[offset]);
+ result->set_empty_block_predecessor(predecessor);
+ return result;
+ }
+
+ void ProcessMergePoint(int offset) {
+ // First copy the merge state to be the current state.
+ MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
+ current_interpreter_frame_.CopyFrom(*compilation_unit_, merge_state);
+
+ if (merge_state.predecessor_count() == 1) return;
+
+ // Set up edge-split.
+ int predecessor_index = merge_state.predecessor_count() - 1;
+ BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
+ while (old_jump_targets != nullptr) {
+ BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
+ ControlNode* control = predecessor->control_node();
+ if (control->Is<ConditionalControlNode>()) {
+ // CreateEmptyBlock automatically registers itself with the offset.
+ predecessor = CreateEmptyBlock(offset, predecessor);
+ // Set the old predecessor's (the conditional block) reference to
+ // point to the new empty predecessor block.
+ old_jump_targets =
+ old_jump_targets->SetToBlockAndReturnNext(predecessor);
+ } else {
+ // Re-register the block in the offset's ref list.
+ old_jump_targets =
+ old_jump_targets->MoveToRefList(&jump_targets_[offset]);
+ }
+ predecessor->set_predecessor_id(predecessor_index--);
+ }
+#ifdef DEBUG
+ if (bytecode_analysis().IsLoopHeader(offset)) {
+ // For loops, the JumpLoop block hasn't been generated yet, and so isn't
+ // in the list of jump targets. It's defined to be at index 0, so once
+ // we've processed all the jump targets, the 0 index should be the one
+ // remaining.
+ DCHECK_EQ(predecessor_index, 0);
+ } else {
+ DCHECK_EQ(predecessor_index, -1);
+ }
+#endif
+ if (has_graph_labeller()) {
+ for (Phi* phi : *merge_states_[offset]->phis()) {
+ graph_labeller()->RegisterNode(phi);
+ }
+ }
+ }
+
+ void VisitSingleBytecode() {
+ int offset = iterator_.current_offset();
+ if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
+ if (current_block_ != nullptr) {
+ DCHECK(!current_block_->nodes().is_empty());
+ FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
+
+ merge_states_[offset]->Merge(*compilation_unit_,
+ current_interpreter_frame_,
+ graph()->last_block(), offset);
+ }
+ ProcessMergePoint(offset);
+ StartNewBlock(offset);
+ }
+ DCHECK_NOT_NULL(current_block_);
+ switch (iterator_.current_bytecode()) {
+#define BYTECODE_CASE(name, ...) \
+ case interpreter::Bytecode::k##name: \
+ Visit##name(); \
+ break;
+ BYTECODE_LIST(BYTECODE_CASE)
+#undef BYTECODE_CASE
+ }
+ }
+
+#define BYTECODE_VISITOR(name, ...) void Visit##name();
+ BYTECODE_LIST(BYTECODE_VISITOR)
+#undef BYTECODE_VISITOR
+
+ template <typename NodeT>
+ NodeT* AddNode(NodeT* node) {
+ current_block_->nodes().Add(node);
+ return node;
+ }
+
+ template <typename NodeT, typename... Args>
+ NodeT* NewNode(size_t input_count, Args&&... args) {
+ NodeT* node =
+ Node::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ return node;
+ }
+
+ template <Operation kOperation, typename... Args>
+ ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
+ Args&&... args);
+
+ template <typename NodeT, typename... Args>
+ NodeT* AddNewNode(size_t input_count, Args&&... args) {
+ return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
+ }
+
+ template <typename NodeT, typename... Args>
+ NodeT* NewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ NodeT* node = Node::New<NodeT>(zone(), inputs, std::forward<Args>(args)...);
+ if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+ return node;
+ }
+
+ template <typename NodeT, typename... Args>
+ NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ return AddNode(NewNode<NodeT>(inputs, std::forward<Args>(args)...));
+ }
+
+ ValueNode* GetContext() const {
+ return current_interpreter_frame_.get(
+ interpreter::Register::current_context());
+ }
+
+ FeedbackSlot GetSlotOperand(int operand_index) const {
+ return iterator_.GetSlotOperand(operand_index);
+ }
+
+ template <class T, typename = std::enable_if_t<
+ std::is_convertible<T*, Object*>::value>>
+ typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
+ return MakeRef(broker(),
+ Handle<T>::cast(iterator_.GetConstantForIndexOperand(
+ operand_index, isolate())));
+ }
+
+ void SetAccumulator(ValueNode* node) {
+ current_interpreter_frame_.set_accumulator(node);
+ }
+
+ ValueNode* GetAccumulator() const {
+ return current_interpreter_frame_.accumulator();
+ }
+
+ ValueNode* LoadRegister(int operand_index) {
+ interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
+ return current_interpreter_frame_.get(source);
+ }
+
+ void StoreRegister(interpreter::Register target, ValueNode* value,
+ const compiler::BytecodeLivenessState* liveness) {
+ if (target.index() >= 0 && !liveness->RegisterIsLive(target.index())) {
+ return;
+ }
+ current_interpreter_frame_.set(target, value);
+ AddNewNode<StoreToFrame>({}, value, target);
+ }
+
+ void AddCheckpoint() {
+ // TODO(v8:7700): Verify this calls the initializer list overload.
+ AddNewNode<Checkpoint>({}, iterator_.current_offset(),
+ GetInLiveness()->AccumulatorIsLive(),
+ GetAccumulator());
+ has_valid_checkpoint_ = true;
+ }
+
+ void EnsureCheckpoint() {
+ if (!has_valid_checkpoint_) AddCheckpoint();
+ }
+
+ void MarkPossibleSideEffect() {
+ // If there was a potential side effect, invalidate the previous checkpoint.
+ has_valid_checkpoint_ = false;
+ }
+
+ int next_offset() const {
+ return iterator_.current_offset() + iterator_.current_bytecode_size();
+ }
+ const compiler::BytecodeLivenessState* GetInLiveness() const {
+ return bytecode_analysis().GetInLivenessFor(iterator_.current_offset());
+ }
+ const compiler::BytecodeLivenessState* GetOutLiveness() const {
+ return bytecode_analysis().GetOutLivenessFor(iterator_.current_offset());
+ }
+
+ void StartNewBlock(int offset) {
+ DCHECK_NULL(current_block_);
+ current_block_ = zone()->New<BasicBlock>(merge_states_[offset]);
+ block_offset_ = offset;
+ }
+
+ template <typename ControlNodeT, typename... Args>
+ BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
+ Args&&... args) {
+ current_block_->set_control_node(NodeBase::New<ControlNodeT>(
+ zone(), control_inputs, std::forward<Args>(args)...));
+
+ BasicBlock* block = current_block_;
+ current_block_ = nullptr;
+
+ graph()->Add(block);
+ if (has_graph_labeller()) {
+ graph_labeller()->RegisterBasicBlock(block);
+ }
+ return block;
+ }
+
+ template <typename ControlNodeT, typename... Args>
+ BasicBlock* FinishBlock(int next_block_offset,
+ std::initializer_list<ValueNode*> control_inputs,
+ Args&&... args) {
+ BasicBlock* block =
+ CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
+
+ // Resolve pointers to this basic block.
+ BasicBlockRef* jump_target_refs_head =
+ jump_targets_[block_offset_].SetToBlockAndReturnNext(block);
+ while (jump_target_refs_head != nullptr) {
+ jump_target_refs_head =
+ jump_target_refs_head->SetToBlockAndReturnNext(block);
+ }
+ DCHECK_EQ(jump_targets_[block_offset_].block_ptr(), block);
+
+ // If the next block has merge states, then it's not a simple fallthrough,
+ // and we should reset the checkpoint validity.
+ if (merge_states_[next_block_offset] != nullptr) {
+ has_valid_checkpoint_ = false;
+ }
+ // Start a new block for the fallthrough path, unless it's a merge point, in
+ // which case we merge our state into it. That merge-point could also be a
+ // loop header, in which case the merge state might not exist yet (if the
+ // only predecessors are this path and the JumpLoop).
+ if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
+ if (NumPredecessors(next_block_offset) == 1) {
+ StartNewBlock(next_block_offset);
+ } else {
+ DCHECK_NULL(current_block_);
+ MergeIntoFrameState(block, next_block_offset);
+ }
+ }
+ return block;
+ }
+
+ template <Operation kOperation>
+ void BuildGenericUnaryOperationNode();
+ template <Operation kOperation>
+ void BuildGenericBinaryOperationNode();
+
+ template <Operation kOperation>
+ void VisitUnaryOperation();
+ template <Operation kOperation>
+ void VisitBinaryOperation();
+
+ void MergeIntoFrameState(BasicBlock* block, int target);
+ void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
+ void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target,
+ int false_target);
+
+ void CalculatePredecessorCounts() {
+ // Add 1 after the end of the bytecode so we can always write to the offset
+ // after the last bytecode.
+ size_t array_length = bytecode().length() + 1;
+ predecessors_ = zone()->NewArray<uint32_t>(array_length);
+ MemsetUint32(predecessors_, 1, array_length);
+
+ interpreter::BytecodeArrayIterator iterator(bytecode().object());
+ for (; !iterator.done(); iterator.Advance()) {
+ interpreter::Bytecode bytecode = iterator.current_bytecode();
+ if (interpreter::Bytecodes::IsJump(bytecode)) {
+ predecessors_[iterator.GetJumpTargetOffset()]++;
+ if (!interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ predecessors_[iterator.next_offset()]--;
+ }
+ } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
+ for (auto offset : iterator.GetJumpTableTargetOffsets()) {
+ predecessors_[offset.target_offset]++;
+ }
+ } else if (interpreter::Bytecodes::Returns(bytecode) ||
+ interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
+ predecessors_[iterator.next_offset()]--;
+ }
+ // TODO(leszeks): Also consider handler entries (the bytecode analysis)
+ // will do this automatically I guess if we merge this into that.
+ }
+ DCHECK_EQ(0, predecessors_[bytecode().length()]);
+ }
+
+ int NumPredecessors(int offset) { return predecessors_[offset]; }
+
+ compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
+ const compiler::FeedbackVectorRef& feedback() const {
+ return compilation_unit_->feedback();
+ }
+ const FeedbackNexus feedback_nexus(int slot_operand_index) const {
+ // TODO(leszeks): Use JSHeapBroker here.
+ return FeedbackNexus(feedback().object(),
+ GetSlotOperand(slot_operand_index));
+ }
+ const compiler::BytecodeArrayRef& bytecode() const {
+ return compilation_unit_->bytecode();
+ }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return compilation_unit_->bytecode_analysis();
+ }
+ Isolate* isolate() const { return compilation_unit_->isolate(); }
+ Zone* zone() const { return compilation_unit_->zone(); }
+ int parameter_count() const { return compilation_unit_->parameter_count(); }
+ int register_count() const { return compilation_unit_->register_count(); }
+ bool has_graph_labeller() const {
+ return compilation_unit_->has_graph_labeller();
+ }
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+
+ MaglevCompilationUnit* const compilation_unit_;
+ interpreter::BytecodeArrayIterator iterator_;
+ uint32_t* predecessors_;
+
+ // Current block information.
+ BasicBlock* current_block_ = nullptr;
+ int block_offset_ = 0;
+ bool has_valid_checkpoint_ = false;
+
+ BasicBlockRef* jump_targets_;
+ MergePointInterpreterFrameState** merge_states_;
+
+ Graph* const graph_;
+ InterpreterFrameState current_interpreter_frame_;
+
+ // Allow marking some bytecodes as unsupported during graph building, so that
+ // we can test maglev incrementally.
+ // TODO(v8:7700): Clean up after all bytecodes are supported.
+ bool found_unsupported_bytecode_ = false;
+ bool this_field_will_be_unused_once_all_bytecodes_are_supported_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-labeller.h b/deps/v8/src/maglev/maglev-graph-labeller.h
new file mode 100644
index 0000000000..252b2152ac
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-labeller.h
@@ -0,0 +1,65 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
+
+#include <map>
+
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevGraphLabeller {
+ public:
+ void RegisterNode(const Node* node) {
+ if (node_ids_.emplace(node, next_node_id_).second) {
+ next_node_id_++;
+ }
+ }
+ void RegisterBasicBlock(const BasicBlock* block) {
+ block_ids_[block] = next_block_id_++;
+ if (node_ids_.emplace(block->control_node(), next_node_id_).second) {
+ next_node_id_++;
+ }
+ }
+
+ int BlockId(const BasicBlock* block) { return block_ids_[block]; }
+ int NodeId(const NodeBase* node) { return node_ids_[node]; }
+
+ int max_node_id() const { return next_node_id_ - 1; }
+
+ int max_node_id_width() const { return std::ceil(std::log10(max_node_id())); }
+
+ void PrintNodeLabel(std::ostream& os, const Node* node) {
+ auto node_id_it = node_ids_.find(node);
+
+ if (node_id_it == node_ids_.end()) {
+ os << "<invalid node " << node << ">";
+ return;
+ }
+
+ os << "n" << node_id_it->second;
+ }
+
+ void PrintInput(std::ostream& os, const Input& input) {
+ PrintNodeLabel(os, input.node());
+ os << ":" << input.operand();
+ }
+
+ private:
+ std::map<const BasicBlock*, int> block_ids_;
+ std::map<const NodeBase*, int> node_ids_;
+ int next_block_id_ = 1;
+ int next_node_id_ = 1;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-printer.cc b/deps/v8/src/maglev/maglev-graph-printer.cc
new file mode 100644
index 0000000000..ccd7bfbad8
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-printer.cc
@@ -0,0 +1,446 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-graph-printer.h"
+
+#include <initializer_list>
+#include <iomanip>
+#include <ostream>
+#include <type_traits>
+#include <vector>
+
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+namespace {
+
+void PrintPaddedId(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ NodeBase* node, std::string padding = " ",
+ int padding_adjustement = 0) {
+ int id = graph_labeller->NodeId(node);
+ int id_width = std::ceil(std::log10(id + 1));
+ int max_width = graph_labeller->max_node_id_width() + 2 + padding_adjustement;
+ int padding_width = std::max(0, max_width - id_width);
+
+ for (int i = 0; i < padding_width; ++i) {
+ os << padding;
+ }
+ os << graph_labeller->NodeId(node) << ": ";
+}
+
+void PrintPadding(std::ostream& os, int size) {
+ os << std::setfill(' ') << std::setw(size) << "";
+}
+
+void PrintPadding(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ int padding_adjustement = 0) {
+ PrintPadding(os,
+ graph_labeller->max_node_id_width() + 2 + padding_adjustement);
+}
+
+enum ConnectionLocation {
+ kTop = 1 << 0,
+ kLeft = 1 << 1,
+ kRight = 1 << 2,
+ kBottom = 1 << 3
+};
+
+struct Connection {
+ void Connect(ConnectionLocation loc) { connected |= loc; }
+
+ void AddHorizontal() {
+ Connect(kLeft);
+ Connect(kRight);
+ }
+
+ void AddVertical() {
+ Connect(kTop);
+ Connect(kBottom);
+ }
+
+ const char* ToString() const {
+ switch (connected) {
+ case 0:
+ return " ";
+ case kTop:
+ return "╵";
+ case kLeft:
+ return "╴";
+ case kRight:
+ return "╶";
+ case kBottom:
+ return "╷";
+ case kTop | kLeft:
+ return "╯";
+ case kTop | kRight:
+ return "╰";
+ case kBottom | kLeft:
+ return "╮";
+ case kBottom | kRight:
+ return "╭";
+ case kTop | kBottom:
+ return "│";
+ case kLeft | kRight:
+ return "─";
+ case kTop | kBottom | kLeft:
+ return "┤";
+ case kTop | kBottom | kRight:
+ return "├";
+ case kLeft | kRight | kTop:
+ return "┴";
+ case kLeft | kRight | kBottom:
+ return "┬";
+ case kTop | kLeft | kRight | kBottom:
+ return "┼";
+ }
+ UNREACHABLE();
+ }
+
+ uint8_t connected = 0;
+};
+
+std::ostream& operator<<(std::ostream& os, const Connection& c) {
+ return os << c.ToString();
+}
+
+// Print the vertical parts of connection arrows, optionally connecting arrows
+// that were only first created on this line (passed in "arrows_starting_here")
+// and should therefore connect rightwards instead of upwards.
+void PrintVerticalArrows(
+ std::ostream& os, const std::vector<BasicBlock*>& targets,
+ const std::set<size_t>& arrows_starting_here = {},
+ const std::set<BasicBlock*>& targets_starting_here = {},
+ bool is_loop = false) {
+ bool saw_start = false;
+ for (size_t i = 0; i < targets.size(); ++i) {
+ Connection c;
+ if (saw_start) {
+ c.AddHorizontal();
+ }
+ if (arrows_starting_here.find(i) != arrows_starting_here.end() ||
+ targets_starting_here.find(targets[i]) != targets_starting_here.end()) {
+ c.Connect(kRight);
+ c.Connect(is_loop ? kTop : kBottom);
+ saw_start = true;
+ }
+
+ // Only add the vertical connection if there was no other connection.
+ if (c.connected == 0 && targets[i] != nullptr) {
+ c.AddVertical();
+ }
+ os << c;
+ }
+}
+
+// Add a target to the target list in the first non-null position from the end.
+// This might have to extend the target list if there is no free spot.
+size_t AddTarget(std::vector<BasicBlock*>& targets, BasicBlock* target) {
+ if (targets.size() == 0 || targets.back() != nullptr) {
+ targets.push_back(target);
+ return targets.size() - 1;
+ }
+
+ size_t i = targets.size();
+ while (i > 0) {
+ if (targets[i - 1] != nullptr) break;
+ i--;
+ }
+ targets[i] = target;
+ return i;
+}
+
+// If the target is not a fallthrough, add i to the target list in the first
+// non-null position from the end. This might have to extend the target list if
+// there is no free spot. Returns true if it was added, false if it was a
+// fallthrough.
+bool AddTargetIfNotNext(std::vector<BasicBlock*>& targets, BasicBlock* target,
+ BasicBlock* next_block,
+ std::set<size_t>* arrows_starting_here = nullptr) {
+ if (next_block == target) return false;
+ size_t index = AddTarget(targets, target);
+ if (arrows_starting_here != nullptr) arrows_starting_here->insert(index);
+ return true;
+}
+
+class MaglevPrintingVisitorOstream : public std::ostream,
+ private std::streambuf {
+ public:
+ MaglevPrintingVisitorOstream(std::ostream& os,
+ std::vector<BasicBlock*>* targets)
+ : std::ostream(this), os_(os), targets_(targets), padding_size_(0) {}
+ ~MaglevPrintingVisitorOstream() override = default;
+
+ static MaglevPrintingVisitorOstream* cast(
+ const std::unique_ptr<std::ostream>& os) {
+ return static_cast<MaglevPrintingVisitorOstream*>(os.get());
+ }
+
+ void set_padding(int padding_size) { padding_size_ = padding_size; }
+
+ protected:
+ int overflow(int c) override;
+
+ private:
+ std::ostream& os_;
+ std::vector<BasicBlock*>* targets_;
+ int padding_size_;
+ bool previous_was_new_line_ = true;
+};
+
+int MaglevPrintingVisitorOstream::overflow(int c) {
+ if (c == EOF) return c;
+
+ if (previous_was_new_line_) {
+ PrintVerticalArrows(os_, *targets_);
+ PrintPadding(os_, padding_size_);
+ }
+ os_.rdbuf()->sputc(c);
+ previous_was_new_line_ = (c == '\n');
+ return c;
+}
+
+} // namespace
+
+MaglevPrintingVisitor::MaglevPrintingVisitor(std::ostream& os)
+ : os_(os),
+ os_for_additional_info_(new MaglevPrintingVisitorOstream(os_, &targets)) {
+}
+
+void MaglevPrintingVisitor::PreProcessGraph(
+ MaglevCompilationUnit* compilation_unit, Graph* graph) {
+ os_ << "Graph (param count: " << compilation_unit->parameter_count()
+ << ", frame size: " << compilation_unit->register_count() << ")\n\n";
+
+ for (BasicBlock* block : *graph) {
+ if (block->control_node()->Is<JumpLoop>()) {
+ loop_headers.insert(block->control_node()->Cast<JumpLoop>()->target());
+ }
+ }
+
+ // Precalculate the maximum number of targets.
+ for (BlockConstIterator block_it = graph->begin(); block_it != graph->end();
+ ++block_it) {
+ BasicBlock* block = *block_it;
+ std::replace(targets.begin(), targets.end(), block,
+ static_cast<BasicBlock*>(nullptr));
+
+ if (loop_headers.find(block) != loop_headers.end()) {
+ AddTarget(targets, block);
+ }
+ ControlNode* node = block->control_node();
+ if (node->Is<JumpLoop>()) {
+ BasicBlock* target = node->Cast<JumpLoop>()->target();
+ std::replace(targets.begin(), targets.end(), target,
+ static_cast<BasicBlock*>(nullptr));
+ } else if (node->Is<UnconditionalControlNode>()) {
+ AddTargetIfNotNext(targets,
+ node->Cast<UnconditionalControlNode>()->target(),
+ *(block_it + 1));
+ } else if (node->Is<ConditionalControlNode>()) {
+ AddTargetIfNotNext(targets,
+ node->Cast<ConditionalControlNode>()->if_true(),
+ *(block_it + 1));
+ AddTargetIfNotNext(targets,
+ node->Cast<ConditionalControlNode>()->if_false(),
+ *(block_it + 1));
+ }
+ }
+ DCHECK(std::all_of(targets.begin(), targets.end(),
+ [](BasicBlock* block) { return block == nullptr; }));
+}
+
+void MaglevPrintingVisitor::PreProcessBasicBlock(
+ MaglevCompilationUnit* compilation_unit, BasicBlock* block) {
+ MaglevGraphLabeller* graph_labeller = compilation_unit->graph_labeller();
+
+ size_t loop_position = static_cast<size_t>(-1);
+ if (loop_headers.erase(block) > 0) {
+ loop_position = AddTarget(targets, block);
+ }
+ {
+ bool saw_start = false;
+ for (size_t i = 0; i < targets.size(); ++i) {
+ Connection c;
+ if (saw_start) {
+ c.AddHorizontal();
+ }
+ // If this is one of the arrows pointing to this block, terminate the
+ // line by connecting it rightwards.
+ if (targets[i] == block) {
+ c.Connect(kRight);
+ // If this is the loop header, go down instead of up and don't clear
+ // the target.
+ if (i == loop_position) {
+ c.Connect(kBottom);
+ } else {
+ c.Connect(kTop);
+ targets[i] = nullptr;
+ }
+ saw_start = true;
+ } else if (c.connected == 0 && targets[i] != nullptr) {
+ // If this is another arrow, connect it, but only if that doesn't
+ // clobber any existing drawing.
+ c.AddVertical();
+ }
+ os_ << c;
+ }
+ os_ << (saw_start ? "►" : " ");
+ }
+
+ int block_id = graph_labeller->BlockId(block);
+ os_ << "Block b" << block_id << "\n";
+
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)->set_padding(1);
+}
+
+void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ PrintVerticalArrows(os_, targets);
+ PrintPaddedId(os_, graph_labeller, phi);
+ os_ << "Phi (";
+ // Manually walk Phi inputs to print just the node labels, without
+ // input locations (which are shown in the predecessor block's gap
+ // moves).
+ for (int i = 0; i < phi->input_count(); ++i) {
+ if (i > 0) os_ << ", ";
+ os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
+ }
+ os_ << ") → " << phi->result().operand() << "\n";
+
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
+ ->set_padding(graph_labeller->max_node_id_width() + 4);
+}
+
+void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+ PrintVerticalArrows(os_, targets);
+ PrintPaddedId(os_, graph_labeller, node);
+ os_ << PrintNode(graph_labeller, node) << "\n";
+
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
+ ->set_padding(graph_labeller->max_node_id_width() + 4);
+}
+
+void MaglevPrintingVisitor::Process(ControlNode* control_node,
+ const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ bool has_fallthrough = false;
+
+ if (control_node->Is<JumpLoop>()) {
+ BasicBlock* target = control_node->Cast<JumpLoop>()->target();
+
+ PrintVerticalArrows(os_, targets, {}, {target}, true);
+ os_ << "◄─";
+ PrintPaddedId(os_, graph_labeller, control_node, "─", -2);
+ std::replace(targets.begin(), targets.end(), target,
+ static_cast<BasicBlock*>(nullptr));
+
+ } else if (control_node->Is<UnconditionalControlNode>()) {
+ BasicBlock* target =
+ control_node->Cast<UnconditionalControlNode>()->target();
+
+ std::set<size_t> arrows_starting_here;
+ has_fallthrough |= !AddTargetIfNotNext(targets, target, state.next_block(),
+ &arrows_starting_here);
+ PrintVerticalArrows(os_, targets, arrows_starting_here);
+ PrintPaddedId(os_, graph_labeller, control_node,
+ has_fallthrough ? " " : "─");
+
+ } else if (control_node->Is<ConditionalControlNode>()) {
+ BasicBlock* true_target =
+ control_node->Cast<ConditionalControlNode>()->if_true();
+ BasicBlock* false_target =
+ control_node->Cast<ConditionalControlNode>()->if_false();
+
+ std::set<size_t> arrows_starting_here;
+ has_fallthrough |= !AddTargetIfNotNext(
+ targets, false_target, state.next_block(), &arrows_starting_here);
+ has_fallthrough |= !AddTargetIfNotNext(
+ targets, true_target, state.next_block(), &arrows_starting_here);
+ PrintVerticalArrows(os_, targets, arrows_starting_here);
+ PrintPaddedId(os_, graph_labeller, control_node, "─");
+
+ } else {
+ PrintVerticalArrows(os_, targets);
+ PrintPaddedId(os_, graph_labeller, control_node);
+ }
+
+ os_ << PrintNode(graph_labeller, control_node) << "\n";
+
+ bool printed_phis = false;
+ if (control_node->Is<UnconditionalControlNode>()) {
+ BasicBlock* target =
+ control_node->Cast<UnconditionalControlNode>()->target();
+ if (target->has_phi()) {
+ printed_phis = true;
+ PrintVerticalArrows(os_, targets);
+ PrintPadding(os_, graph_labeller, -1);
+ os_ << (has_fallthrough ? "│" : " ");
+ os_ << " with gap moves:\n";
+ int pid = state.block()->predecessor_id();
+ for (Phi* phi : *target->phis()) {
+ PrintVerticalArrows(os_, targets);
+ PrintPadding(os_, graph_labeller, -1);
+ os_ << (has_fallthrough ? "│" : " ");
+ os_ << " - ";
+ graph_labeller->PrintInput(os_, phi->input(pid));
+ os_ << " → " << graph_labeller->NodeId(phi) << ": Phi "
+ << phi->result().operand() << "\n";
+ }
+ }
+ }
+
+ PrintVerticalArrows(os_, targets);
+ if (has_fallthrough) {
+ PrintPadding(os_, graph_labeller, -1);
+ if (printed_phis) {
+ os_ << "▼";
+ } else {
+ os_ << "↓";
+ }
+ }
+ os_ << "\n";
+
+ // TODO(leszeks): Allow MaglevPrintingVisitorOstream to print the arrowhead
+ // so that it overlaps the fallthrough arrow.
+ MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
+ ->set_padding(graph_labeller->max_node_id_width() + 4);
+}
+
+void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
+ Graph* const graph) {
+ GraphProcessor<MaglevPrintingVisitor> printer(compilation_unit, os);
+ printer.ProcessGraph(graph);
+}
+
+void PrintNode::Print(std::ostream& os) const {
+ node_->Print(os, graph_labeller_);
+}
+
+std::ostream& operator<<(std::ostream& os, const PrintNode& printer) {
+ printer.Print(os);
+ return os;
+}
+
+void PrintNodeLabel::Print(std::ostream& os) const {
+ graph_labeller_->PrintNodeLabel(os, node_);
+}
+
+std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer) {
+ printer.Print(os);
+ return os;
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-graph-printer.h b/deps/v8/src/maglev/maglev-graph-printer.h
new file mode 100644
index 0000000000..d416293d08
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-printer.h
@@ -0,0 +1,85 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
+
+#include <memory>
+#include <ostream>
+#include <set>
+#include <vector>
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class BasicBlock;
+class ControlNode;
+class Graph;
+class MaglevCompilationUnit;
+class MaglevGraphLabeller;
+class Node;
+class NodeBase;
+class Phi;
+class ProcessingState;
+
+class MaglevPrintingVisitor {
+ public:
+ // Could be interesting to print checkpoints too.
+ static constexpr bool kNeedsCheckpointStates = false;
+
+ explicit MaglevPrintingVisitor(std::ostream& os);
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
+ void Process(Phi* phi, const ProcessingState& state);
+ void Process(Node* node, const ProcessingState& state);
+ void Process(ControlNode* node, const ProcessingState& state);
+
+ std::ostream& os() { return *os_for_additional_info_; }
+
+ private:
+ std::ostream& os_;
+ std::unique_ptr<std::ostream> os_for_additional_info_;
+ std::set<BasicBlock*> loop_headers;
+ std::vector<BasicBlock*> targets;
+};
+
+void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
+ Graph* const graph);
+
+class PrintNode {
+ public:
+ PrintNode(MaglevGraphLabeller* graph_labeller, const NodeBase* node)
+ : graph_labeller_(graph_labeller), node_(node) {}
+
+ void Print(std::ostream& os) const;
+
+ private:
+ MaglevGraphLabeller* graph_labeller_;
+ const NodeBase* node_;
+};
+
+std::ostream& operator<<(std::ostream& os, const PrintNode& printer);
+
+class PrintNodeLabel {
+ public:
+ PrintNodeLabel(MaglevGraphLabeller* graph_labeller, const Node* node)
+ : graph_labeller_(graph_labeller), node_(node) {}
+
+ void Print(std::ostream& os) const;
+
+ private:
+ MaglevGraphLabeller* graph_labeller_;
+ const Node* node_;
+};
+
+std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer);
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
diff --git a/deps/v8/src/maglev/maglev-graph-processor.h b/deps/v8/src/maglev/maglev-graph-processor.h
new file mode 100644
index 0000000000..892fe6071b
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-processor.h
@@ -0,0 +1,423 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
+
+#include "src/compiler/bytecode-analysis.h"
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+// The GraphProcessor takes a NodeProcessor, and applies it to each Node in the
+// Graph by calling NodeProcessor::Process on each Node.
+//
+// The GraphProcessor also keeps track of the current ProcessingState, including
+// the inferred corresponding InterpreterFrameState and (optionally) the state
+// at the most recent Checkpoint, and passes this to the Process method.
+//
+// It expects a NodeProcessor class with:
+//
+// // True if the GraphProcessor should snapshot Checkpoint states for
+// // deopting nodes.
+// static constexpr bool kNeedsCheckpointStates;
+//
+// // A function that processes the graph before the nodes are walked.
+// void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
+//
+// // A function that processes the graph after the nodes are walked.
+// void PostProcessGraph(MaglevCompilationUnit*, Graph* graph);
+//
+// // A function that processes each basic block before its nodes are walked.
+// void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
+//
+// // Process methods for each Node type. The GraphProcessor switches over
+// // the Node's opcode, casts it to the appropriate FooNode, and dispatches
+// // to NodeProcessor::Process. It's then up to the NodeProcessor to provide
+// // either distinct Process methods per Node type, or using templates or
+// // overloading as appropriate to group node processing.
+// void Process(FooNode* node, const ProcessingState& state) {}
+//
+template <typename NodeProcessor>
+class GraphProcessor;
+
+class ProcessingState {
+ public:
+ explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
+ BlockConstIterator block_it,
+ const InterpreterFrameState* interpreter_frame_state,
+ const Checkpoint* checkpoint,
+ const InterpreterFrameState* checkpoint_frame_state)
+ : compilation_unit_(compilation_unit),
+ block_it_(block_it),
+ interpreter_frame_state_(interpreter_frame_state),
+ checkpoint_(checkpoint),
+ checkpoint_frame_state_(checkpoint_frame_state) {}
+
+ // Disallow copies, since the underlying frame states stay mutable.
+ ProcessingState(const ProcessingState&) = delete;
+ ProcessingState& operator=(const ProcessingState&) = delete;
+
+ BasicBlock* block() const { return *block_it_; }
+ BasicBlock* next_block() const { return *(block_it_ + 1); }
+
+ const InterpreterFrameState* interpreter_frame_state() const {
+ DCHECK_NOT_NULL(interpreter_frame_state_);
+ return interpreter_frame_state_;
+ }
+
+ const Checkpoint* checkpoint() const {
+ DCHECK_NOT_NULL(checkpoint_);
+ return checkpoint_;
+ }
+
+ const InterpreterFrameState* checkpoint_frame_state() const {
+ DCHECK_NOT_NULL(checkpoint_frame_state_);
+ return checkpoint_frame_state_;
+ }
+
+ int register_count() const { return compilation_unit_->register_count(); }
+ int parameter_count() const { return compilation_unit_->parameter_count(); }
+
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+
+ private:
+ MaglevCompilationUnit* compilation_unit_;
+ BlockConstIterator block_it_;
+ const InterpreterFrameState* interpreter_frame_state_;
+ const Checkpoint* checkpoint_;
+ const InterpreterFrameState* checkpoint_frame_state_;
+};
+
+template <typename NodeProcessor>
+class GraphProcessor {
+ public:
+ static constexpr bool kNeedsCheckpointStates =
+ NodeProcessor::kNeedsCheckpointStates;
+
+ template <typename... Args>
+ explicit GraphProcessor(MaglevCompilationUnit* compilation_unit,
+ Args&&... args)
+ : compilation_unit_(compilation_unit),
+ node_processor_(std::forward<Args>(args)...),
+ current_frame_state_(*compilation_unit_) {
+ if (kNeedsCheckpointStates) {
+ checkpoint_state_.emplace(*compilation_unit_);
+ }
+ }
+
+ void ProcessGraph(Graph* graph) {
+ graph_ = graph;
+
+ node_processor_.PreProcessGraph(compilation_unit_, graph);
+
+ for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
+ BasicBlock* block = *block_it_;
+
+ node_processor_.PreProcessBasicBlock(compilation_unit_, block);
+
+ if (block->has_state()) {
+ current_frame_state_.CopyFrom(*compilation_unit_, *block->state());
+ if (kNeedsCheckpointStates) {
+ checkpoint_state_->last_checkpoint_block_it = block_it_;
+ checkpoint_state_->last_checkpoint_node_it = NodeConstIterator();
+ }
+ }
+
+ if (block->has_phi()) {
+ for (Phi* phi : *block->phis()) {
+ node_processor_.Process(phi, GetCurrentState());
+ }
+ }
+
+ for (node_it_ = block->nodes().begin(); node_it_ != block->nodes().end();
+ ++node_it_) {
+ Node* node = *node_it_;
+ ProcessNodeBase(node, GetCurrentState());
+ }
+
+ ProcessNodeBase(block->control_node(), GetCurrentState());
+ }
+
+ node_processor_.PostProcessGraph(compilation_unit_, graph);
+ }
+
+ NodeProcessor& node_processor() { return node_processor_; }
+ const NodeProcessor& node_processor() const { return node_processor_; }
+
+ private:
+ ProcessingState GetCurrentState() {
+ return ProcessingState(
+ compilation_unit_, block_it_, &current_frame_state_,
+ kNeedsCheckpointStates ? checkpoint_state_->latest_checkpoint : nullptr,
+ kNeedsCheckpointStates ? &checkpoint_state_->checkpoint_frame_state
+ : nullptr);
+ }
+
+ void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+#define CASE(OPCODE) \
+ case Opcode::k##OPCODE: \
+ PreProcess(node->Cast<OPCODE>(), state); \
+ node_processor_.Process(node->Cast<OPCODE>(), state); \
+ break;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+ }
+
+ void PreProcess(NodeBase* node, const ProcessingState& state) {}
+
+ void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
+ current_frame_state_.set_accumulator(checkpoint->accumulator());
+ if (kNeedsCheckpointStates) {
+ checkpoint_state_->latest_checkpoint = checkpoint;
+ if (checkpoint->is_used()) {
+ checkpoint_state_->checkpoint_frame_state.CopyFrom(
+ *compilation_unit_, current_frame_state_);
+ checkpoint_state_->last_checkpoint_block_it = block_it_;
+ checkpoint_state_->last_checkpoint_node_it = node_it_;
+ ClearDeadCheckpointNodes();
+ }
+ }
+ }
+
+ void PreProcess(StoreToFrame* store_to_frame, const ProcessingState& state) {
+ current_frame_state_.set(store_to_frame->target(), store_to_frame->value());
+ }
+
+ void PreProcess(SoftDeopt* node, const ProcessingState& state) {
+ PreProcessDeoptingNode();
+ }
+
+ void PreProcess(CheckMaps* node, const ProcessingState& state) {
+ PreProcessDeoptingNode();
+ }
+
+ void PreProcessDeoptingNode() {
+ if (!kNeedsCheckpointStates) return;
+
+ Checkpoint* checkpoint = checkpoint_state_->latest_checkpoint;
+ if (checkpoint->is_used()) {
+ DCHECK(!checkpoint_state_->last_checkpoint_node_it.is_null());
+ DCHECK_EQ(checkpoint, *checkpoint_state_->last_checkpoint_node_it);
+ return;
+ }
+ DCHECK_IMPLIES(!checkpoint_state_->last_checkpoint_node_it.is_null(),
+ checkpoint != *checkpoint_state_->last_checkpoint_node_it);
+
+ // TODO(leszeks): The following code is _ugly_, should figure out how to
+ // clean it up.
+
+ // Go to the previous state checkpoint (either on the Checkpoint that
+ // provided the current checkpoint snapshot, or on a BasicBlock).
+ BlockConstIterator block_it = checkpoint_state_->last_checkpoint_block_it;
+ NodeConstIterator node_it = checkpoint_state_->last_checkpoint_node_it;
+ if (node_it.is_null()) {
+ // There was no recent enough Checkpoint node, and the block iterator
+ // points at a basic block with a state snapshot. Copy that snapshot and
+ // start iterating from there.
+ BasicBlock* block = *block_it;
+ DCHECK(block->has_state());
+ checkpoint_state_->checkpoint_frame_state.CopyFrom(*compilation_unit_,
+ *block->state());
+
+ // Start iterating from the first node in the block.
+ node_it = block->nodes().begin();
+ } else {
+ // The node iterator should point at the previous Checkpoint node. We
+ // don't need that Checkpoint state snapshot anymore, we're making a new
+ // one, so we can just reuse the snapshot as-is without copying it.
+ DCHECK_NE(*node_it, checkpoint);
+ DCHECK((*node_it)->Is<Checkpoint>());
+ DCHECK((*node_it)->Cast<Checkpoint>()->is_used());
+
+ // Advance it by one since we don't need to check this node anymore.
+ ++node_it;
+ }
+
+ // Now walk forward to the checkpoint, and apply any StoreToFrame operations
+ // along the way into the snapshotted checkpoint state.
+ BasicBlock* block = *block_it;
+ while (true) {
+ // Check if we've run out of nodes in this block, and advance to the
+ // next block if so.
+ while (node_it == block->nodes().end()) {
+ DCHECK_NE(block_it, graph_->end());
+
+ // We should only end up visiting blocks with fallthrough to the next
+ // block -- otherwise, the block should have had a frame state snapshot,
+ // as either a merge block or a non-fallthrough jump target.
+ if ((*block_it)->control_node()->Is<Jump>()) {
+ DCHECK_EQ((*block_it)->control_node()->Cast<Jump>()->target(),
+ *(block_it + 1));
+ } else {
+ DCHECK_IMPLIES((*block_it)
+ ->control_node()
+ ->Cast<ConditionalControlNode>()
+ ->if_true() != *(block_it + 1),
+ (*block_it)
+ ->control_node()
+ ->Cast<ConditionalControlNode>()
+ ->if_false() != *(block_it + 1));
+ }
+
+ // Advance to the next block (which the above DCHECKs confirm is the
+ // unconditional fallthrough from the previous block), and update the
+ // cached block pointer.
+ block_it++;
+ block = *block_it;
+
+ // We should never visit a block with state (aside from the very first
+ // block we visit), since then that should have been our start point
+ // to start with.
+ DCHECK(!(*block_it)->has_state());
+ node_it = (*block_it)->nodes().begin();
+ }
+
+ // We should never reach the current node, the "until" checkpoint node
+ // should be before it.
+ DCHECK_NE(node_it, node_it_);
+
+ Node* node = *node_it;
+
+ // Break once we hit the given Checkpoint node. This could be right at
+ // the start of the iteration, if the BasicBlock held the snapshot and the
+ // Checkpoint was the first node in it.
+ if (node == checkpoint) break;
+
+ // Update the state from the current node, if it's a state update.
+ if (node->Is<StoreToFrame>()) {
+ StoreToFrame* store_to_frame = node->Cast<StoreToFrame>();
+ checkpoint_state_->checkpoint_frame_state.set(store_to_frame->target(),
+ store_to_frame->value());
+ } else {
+ // Any checkpoints we meet along the way should be unused, otherwise
+ // they should have provided the most recent state snapshot.
+ DCHECK_IMPLIES(node->Is<Checkpoint>(),
+ !node->Cast<Checkpoint>()->is_used());
+ }
+
+ // Continue to the next node.
+ ++node_it;
+ }
+
+ checkpoint_state_->last_checkpoint_block_it = block_it;
+ checkpoint_state_->last_checkpoint_node_it = node_it;
+ checkpoint_state_->checkpoint_frame_state.set_accumulator(
+ checkpoint->accumulator());
+ ClearDeadCheckpointNodes();
+ checkpoint->SetUsed();
+ }
+
+ // Walk the checkpointed state, and null out any values that are dead at this
+ // checkpoint.
+ // TODO(leszeks): Consider doing this on checkpoint copy, not as a
+ // post-process step.
+ void ClearDeadCheckpointNodes() {
+ const compiler::BytecodeLivenessState* liveness =
+ bytecode_analysis().GetInLivenessFor(
+ checkpoint_state_->latest_checkpoint->bytecode_position());
+ for (int i = 0; i < register_count(); ++i) {
+ if (!liveness->RegisterIsLive(i)) {
+ checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
+ nullptr);
+ }
+ }
+
+ // The accumulator is on the checkpoint node itself, and should have already
+ // been nulled out during graph building if it's dead.
+ DCHECK_EQ(
+ !liveness->AccumulatorIsLive(),
+ checkpoint_state_->checkpoint_frame_state.accumulator() == nullptr);
+ }
+
+ int register_count() const { return compilation_unit_->register_count(); }
+ const compiler::BytecodeAnalysis& bytecode_analysis() const {
+ return compilation_unit_->bytecode_analysis();
+ }
+
+ MaglevCompilationUnit* const compilation_unit_;
+ NodeProcessor node_processor_;
+ Graph* graph_;
+ BlockConstIterator block_it_;
+ NodeConstIterator node_it_;
+ InterpreterFrameState current_frame_state_;
+
+ // The CheckpointState field only exists if the node processor needs
+ // checkpoint states.
+ struct CheckpointState {
+ explicit CheckpointState(const MaglevCompilationUnit& compilation_unit)
+ : checkpoint_frame_state(compilation_unit) {}
+ Checkpoint* latest_checkpoint = nullptr;
+ BlockConstIterator last_checkpoint_block_it;
+ NodeConstIterator last_checkpoint_node_it;
+ InterpreterFrameState checkpoint_frame_state;
+ };
+ base::Optional<CheckpointState> checkpoint_state_;
+};
+
+// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
+// them iteratively.
+template <typename... Processors>
+class NodeMultiProcessor;
+
+template <>
+class NodeMultiProcessor<> {
+ public:
+ static constexpr bool kNeedsCheckpointStates = false;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+ void Process(NodeBase* node, const ProcessingState& state) {}
+};
+
+template <typename Processor, typename... Processors>
+class NodeMultiProcessor<Processor, Processors...>
+ : NodeMultiProcessor<Processors...> {
+ using Base = NodeMultiProcessor<Processors...>;
+
+ public:
+ static constexpr bool kNeedsCheckpointStates =
+ Processor::kNeedsCheckpointStates || Base::kNeedsCheckpointStates;
+
+ template <typename Node>
+ void Process(Node* node, const ProcessingState& state) {
+ processor_.Process(node, state);
+ Base::Process(node, state);
+ }
+ void PreProcessGraph(MaglevCompilationUnit* unit, Graph* graph) {
+ processor_.PreProcessGraph(unit, graph);
+ Base::PreProcessGraph(unit, graph);
+ }
+ void PostProcessGraph(MaglevCompilationUnit* unit, Graph* graph) {
+ // Post process in reverse order because that kind of makes sense.
+ Base::PostProcessGraph(unit, graph);
+ processor_.PostProcessGraph(unit, graph);
+ }
+ void PreProcessBasicBlock(MaglevCompilationUnit* unit, BasicBlock* block) {
+ processor_.PreProcessBasicBlock(unit, block);
+ Base::PreProcessBasicBlock(unit, block);
+ }
+
+ private:
+ Processor processor_;
+};
+
+template <typename... Processors>
+using GraphMultiProcessor = GraphProcessor<NodeMultiProcessor<Processors...>>;
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
diff --git a/deps/v8/src/maglev/maglev-graph.h b/deps/v8/src/maglev/maglev-graph.h
new file mode 100644
index 0000000000..d2fa0726e5
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph.h
@@ -0,0 +1,60 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_H_
+
+#include <vector>
+
+#include "src/maglev/maglev-basic-block.h"
+#include "src/zone/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+using BlockConstIterator =
+ std::vector<BasicBlock*, ZoneAllocator<BasicBlock*>>::const_iterator;
+using BlockConstReverseIterator =
+ std::vector<BasicBlock*,
+ ZoneAllocator<BasicBlock*>>::const_reverse_iterator;
+
+class Graph final : public ZoneObject {
+ public:
+ static Graph* New(Zone* zone) { return zone->New<Graph>(zone); }
+
+ // Shouldn't be used directly; public so that Zone::New can access it.
+ explicit Graph(Zone* zone) : blocks_(zone) {}
+
+ BasicBlock* operator[](int i) { return blocks_[i]; }
+ const BasicBlock* operator[](int i) const { return blocks_[i]; }
+
+ int num_blocks() const { return static_cast<int>(blocks_.size()); }
+
+ BlockConstIterator begin() const { return blocks_.begin(); }
+ BlockConstIterator end() const { return blocks_.end(); }
+ BlockConstReverseIterator rbegin() const { return blocks_.rbegin(); }
+ BlockConstReverseIterator rend() const { return blocks_.rend(); }
+
+ BasicBlock* last_block() const { return blocks_.back(); }
+
+ void Add(BasicBlock* block) { blocks_.push_back(block); }
+
+ uint32_t stack_slots() const { return stack_slots_; }
+ void set_stack_slots(uint32_t stack_slots) {
+ DCHECK_EQ(kMaxUInt32, stack_slots_);
+ DCHECK_NE(kMaxUInt32, stack_slots);
+ stack_slots_ = stack_slots;
+ }
+
+ private:
+ uint32_t stack_slots_ = kMaxUInt32;
+ ZoneVector<BasicBlock*> blocks_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_H_
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.h b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
new file mode 100644
index 0000000000..5a907607f9
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
@@ -0,0 +1,400 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
+#define V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
+
+#include "src/base/logging.h"
+#include "src/base/threaded-list.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+#include "src/maglev/maglev-register-frame-array.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class BasicBlock;
+class MergePointInterpreterFrameState;
+
+class InterpreterFrameState {
+ public:
+ explicit InterpreterFrameState(const MaglevCompilationUnit& info)
+ : frame_(info) {}
+
+ InterpreterFrameState(const MaglevCompilationUnit& info,
+ const InterpreterFrameState& state)
+ : accumulator_(state.accumulator_), frame_(info) {
+ frame_.CopyFrom(info, state.frame_, nullptr);
+ }
+
+ void CopyFrom(const MaglevCompilationUnit& info,
+ const InterpreterFrameState& state) {
+ accumulator_ = state.accumulator_;
+ frame_.CopyFrom(info, state.frame_, nullptr);
+ }
+
+ inline void CopyFrom(const MaglevCompilationUnit& info,
+ const MergePointInterpreterFrameState& state);
+
+ void set_accumulator(ValueNode* value) { accumulator_ = value; }
+ ValueNode* accumulator() const { return accumulator_; }
+
+ void set(interpreter::Register reg, ValueNode* value) {
+ DCHECK_IMPLIES(reg.is_parameter(),
+ reg == interpreter::Register::current_context() ||
+ reg == interpreter::Register::function_closure() ||
+ reg.ToParameterIndex() >= 0);
+ frame_[reg] = value;
+ }
+ ValueNode* get(interpreter::Register reg) const {
+ DCHECK_IMPLIES(reg.is_parameter(),
+ reg == interpreter::Register::current_context() ||
+ reg == interpreter::Register::function_closure() ||
+ reg.ToParameterIndex() >= 0);
+ return frame_[reg];
+ }
+
+ const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
+
+ private:
+ ValueNode* accumulator_ = nullptr;
+ RegisterFrameArray<ValueNode*> frame_;
+};
+
+class MergePointRegisterState {
+ public:
+ class Iterator {
+ public:
+ struct Entry {
+ RegisterState& state;
+ Register reg;
+ };
+ explicit Iterator(RegisterState* value_pointer,
+ RegList::Iterator reg_iterator)
+ : current_value_(value_pointer), reg_iterator_(reg_iterator) {}
+ Entry operator*() { return {*current_value_, *reg_iterator_}; }
+ void operator++() {
+ ++current_value_;
+ ++reg_iterator_;
+ }
+ bool operator!=(const Iterator& other) const {
+ return current_value_ != other.current_value_;
+ }
+
+ private:
+ RegisterState* current_value_;
+ RegList::Iterator reg_iterator_;
+ };
+
+ bool is_initialized() const { return values_[0].GetPayload().is_initialized; }
+
+ Iterator begin() {
+ return Iterator(values_, kAllocatableGeneralRegisters.begin());
+ }
+ Iterator end() {
+ return Iterator(values_ + kAllocatableGeneralRegisterCount,
+ kAllocatableGeneralRegisters.end());
+ }
+
+ private:
+ RegisterState values_[kAllocatableGeneralRegisterCount] = {{}};
+};
+
+class MergePointInterpreterFrameState {
+ public:
+ void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
+ int merge_offset, interpreter::Register reg,
+ ValueNode* value) {
+#ifdef DEBUG
+ const auto& analysis = compilation_unit.bytecode_analysis();
+ if (!analysis.IsLoopHeader(merge_offset)) return;
+ auto& assignments = analysis.GetLoopInfoFor(merge_offset).assignments();
+ if (reg.is_parameter()) {
+ if (!assignments.ContainsParameter(reg.ToParameterIndex())) return;
+ } else {
+ DCHECK(
+ analysis.GetInLivenessFor(merge_offset)->RegisterIsLive(reg.index()));
+ if (!assignments.ContainsLocal(reg.index())) return;
+ }
+ DCHECK(value->Is<Phi>());
+#endif
+ }
+
+ MergePointInterpreterFrameState(
+ const MaglevCompilationUnit& info, const InterpreterFrameState& state,
+ int merge_offset, int predecessor_count, BasicBlock* predecessor,
+ const compiler::BytecodeLivenessState* liveness)
+ : predecessor_count_(predecessor_count),
+ predecessors_so_far_(1),
+ live_registers_and_accumulator_(
+ info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
+ liveness_(liveness),
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
+ int live_index = 0;
+ ForEachRegister(info, [&](interpreter::Register reg) {
+ live_registers_and_accumulator_[live_index++] = state.get(reg);
+ });
+ if (liveness_->AccumulatorIsLive()) {
+ live_registers_and_accumulator_[live_index++] = state.accumulator();
+ }
+ predecessors_[0] = predecessor;
+ }
+
+ MergePointInterpreterFrameState(
+ const MaglevCompilationUnit& info, int merge_offset,
+ int predecessor_count, const compiler::BytecodeLivenessState* liveness,
+ const compiler::LoopInfo* loop_info)
+ : predecessor_count_(predecessor_count),
+ predecessors_so_far_(1),
+ live_registers_and_accumulator_(
+ info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
+ liveness_(liveness),
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
+ int live_index = 0;
+ auto& assignments = loop_info->assignments();
+ ForEachParameter(info, [&](interpreter::Register reg) {
+ ValueNode* value = nullptr;
+ if (assignments.ContainsParameter(reg.ToParameterIndex())) {
+ value = NewLoopPhi(info.zone(), reg, merge_offset, value);
+ }
+ live_registers_and_accumulator_[live_index++] = value;
+ });
+ ForEachLocal([&](interpreter::Register reg) {
+ ValueNode* value = nullptr;
+ if (assignments.ContainsLocal(reg.index())) {
+ value = NewLoopPhi(info.zone(), reg, merge_offset, value);
+ }
+ live_registers_and_accumulator_[live_index++] = value;
+ });
+ DCHECK(!liveness_->AccumulatorIsLive());
+
+#ifdef DEBUG
+ predecessors_[0] = nullptr;
+#endif
+ }
+
+ // Merges an unmerged framestate with a possibly merged framestate into |this|
+ // framestate.
+ void Merge(const MaglevCompilationUnit& compilation_unit,
+ const InterpreterFrameState& unmerged, BasicBlock* predecessor,
+ int merge_offset) {
+ DCHECK_GT(predecessor_count_, 1);
+ DCHECK_LT(predecessors_so_far_, predecessor_count_);
+ predecessors_[predecessors_so_far_] = predecessor;
+
+ ForEachValue(
+ compilation_unit, [&](interpreter::Register reg, ValueNode*& value) {
+ CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
+
+ value = MergeValue(compilation_unit.zone(), reg, value,
+ unmerged.get(reg), merge_offset);
+ });
+ predecessors_so_far_++;
+ DCHECK_LE(predecessors_so_far_, predecessor_count_);
+ }
+
+ MergePointRegisterState& register_state() { return register_state_; }
+
+ // Merges an unmerged framestate with a possibly merged framestate into |this|
+ // framestate.
+ void MergeLoop(const MaglevCompilationUnit& compilation_unit,
+ const InterpreterFrameState& loop_end_state,
+ BasicBlock* loop_end_block, int merge_offset) {
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+ DCHECK_NULL(predecessors_[0]);
+ predecessors_[0] = loop_end_block;
+
+ ForEachValue(
+ compilation_unit, [&](interpreter::Register reg, ValueNode* value) {
+ CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
+
+ MergeLoopValue(compilation_unit.zone(), reg, value,
+ loop_end_state.get(reg), merge_offset);
+ });
+ DCHECK(!liveness_->AccumulatorIsLive());
+ }
+
+ bool has_phi() const { return !phis_.is_empty(); }
+ Phi::List* phis() { return &phis_; }
+
+ void SetPhis(Phi::List&& phis) {
+ // Move the collected phis to the live interpreter frame.
+ DCHECK(phis_.is_empty());
+ phis_.MoveTail(&phis, phis.begin());
+ }
+
+ int predecessor_count() const { return predecessor_count_; }
+
+ BasicBlock* predecessor_at(int i) const {
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+ DCHECK_LT(i, predecessor_count_);
+ return predecessors_[i];
+ }
+
+ private:
+ friend void InterpreterFrameState::CopyFrom(
+ const MaglevCompilationUnit& info,
+ const MergePointInterpreterFrameState& state);
+
+ ValueNode* MergeValue(Zone* zone, interpreter::Register owner,
+ ValueNode* merged, ValueNode* unmerged,
+ int merge_offset) {
+ // If the merged node is null, this is a pre-created loop header merge
+ // frame will null values for anything that isn't a loop Phi.
+ if (merged == nullptr) {
+ DCHECK_NULL(predecessors_[0]);
+ DCHECK_EQ(predecessors_so_far_, 1);
+ return unmerged;
+ }
+
+ Phi* result = merged->TryCast<Phi>();
+ if (result != nullptr && result->merge_offset() == merge_offset) {
+ // It's possible that merged == unmerged at this point since loop-phis are
+ // not dropped if they are only assigned to themselves in the loop.
+ DCHECK_EQ(result->owner(), owner);
+ result->set_input(predecessors_so_far_, unmerged);
+ return result;
+ }
+
+ if (merged == unmerged) return merged;
+
+ // Up to this point all predecessors had the same value for this interpreter
+ // frame slot. Now that we find a distinct value, insert a copy of the first
+ // value for each predecessor seen so far, in addition to the new value.
+ // TODO(verwaest): Unclear whether we want this for Maglev: Instead of
+ // letting the register allocator remove phis, we could always merge through
+ // the frame slot. In that case we only need the inputs for representation
+ // selection, and hence could remove duplicate inputs. We'd likely need to
+ // attach the interpreter register to the phi in that case?
+ result = Node::New<Phi>(zone, predecessor_count_, owner, merge_offset);
+
+ for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
+ result->set_input(predecessors_so_far_, unmerged);
+
+ phis_.Add(result);
+ return result;
+ }
+
+ void MergeLoopValue(Zone* zone, interpreter::Register owner,
+ ValueNode* merged, ValueNode* unmerged,
+ int merge_offset) {
+ Phi* result = merged->TryCast<Phi>();
+ if (result == nullptr || result->merge_offset() != merge_offset) {
+ DCHECK_EQ(merged, unmerged);
+ return;
+ }
+ DCHECK_EQ(result->owner(), owner);
+ // The loop jump is defined to unconditionally be index 0.
+#ifdef DEBUG
+ DCHECK_NULL(result->input(0).node());
+#endif
+ result->set_input(0, unmerged);
+ }
+
+ ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg, int merge_offset,
+ ValueNode* initial_value) {
+ DCHECK_EQ(predecessors_so_far_, 1);
+ // Create a new loop phi, which for now is empty.
+ Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
+#ifdef DEBUG
+ result->set_input(0, nullptr);
+#endif
+ phis_.Add(result);
+ return result;
+ }
+ static int SizeFor(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness) {
+ return info.parameter_count() + liveness->live_value_count();
+ }
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(Function&& f) const {
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(Function&& f) {
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
+ ForEachParameter(info, f);
+ ForEachLocal(f);
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
+ ForEachParameter(info, f);
+ ForEachLocal(f);
+ }
+
+ template <typename Function>
+ void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
+ int live_index = 0;
+ ForEachRegister(info, [&](interpreter::Register reg) {
+ f(reg, live_registers_and_accumulator_[live_index++]);
+ });
+ if (liveness_->AccumulatorIsLive()) {
+ f(interpreter::Register::virtual_accumulator(),
+ live_registers_and_accumulator_[live_index++]);
+ live_index++;
+ }
+ DCHECK_EQ(live_index, SizeFor(info, liveness_));
+ }
+
+ int predecessor_count_;
+ int predecessors_so_far_;
+ Phi::List phis_;
+ ValueNode** live_registers_and_accumulator_;
+ const compiler::BytecodeLivenessState* liveness_ = nullptr;
+ BasicBlock** predecessors_;
+
+ MergePointRegisterState register_state_;
+};
+
+void InterpreterFrameState::CopyFrom(
+ const MaglevCompilationUnit& info,
+ const MergePointInterpreterFrameState& state) {
+ int live_index = 0;
+ state.ForEachRegister(info, [&](interpreter::Register reg) {
+ frame_[reg] = state.live_registers_and_accumulator_[live_index++];
+ });
+ if (state.liveness_->AccumulatorIsLive()) {
+ accumulator_ = state.live_registers_and_accumulator_[live_index++];
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
diff --git a/deps/v8/src/maglev/maglev-ir.cc b/deps/v8/src/maglev/maglev-ir.cc
new file mode 100644
index 0000000000..929a748330
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-ir.cc
@@ -0,0 +1,922 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-ir.h"
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/ic/handler-configuration.h"
+#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-vreg-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+const char* ToString(Opcode opcode) {
+#define DEF_NAME(Name) #Name,
+ static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
+#undef DEF_NAME
+ return names[static_cast<int>(opcode)];
+}
+
+#define __ code_gen_state->masm()->
+
+// TODO(v8:7700): Clean up after all code paths are supported.
+static bool g_this_field_will_be_unused_once_all_code_paths_are_supported;
+#define UNSUPPORTED() \
+ do { \
+ std::cerr << "Maglev: Can't compile, unsuppored codegen path.\n"; \
+ code_gen_state->set_found_unsupported_code_paths(true); \
+ g_this_field_will_be_unused_once_all_code_paths_are_supported = true; \
+ } while (false)
+
+namespace {
+
+// ---
+// Vreg allocation helpers.
+// ---
+
+int GetVirtualRegister(Node* node) {
+ return compiler::UnallocatedOperand::cast(node->result().operand())
+ .virtual_register();
+}
+
+void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
+ node->result().SetUnallocated(
+ compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
+ vreg_state->AllocateVirtualRegister());
+}
+
+void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
+ Register reg) {
+ node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
+ reg.code(),
+ vreg_state->AllocateVirtualRegister());
+}
+
+// TODO(victorgomes): Use this for smi binary operation and remove attribute
+// [[maybe_unused]].
+[[maybe_unused]] void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
+ Node* node) {
+ node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
+}
+
+void UseRegister(Input& input) {
+ input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
+ compiler::UnallocatedOperand::USED_AT_START,
+ GetVirtualRegister(input.node()));
+}
+void UseAny(Input& input) {
+ input.SetUnallocated(
+ compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ compiler::UnallocatedOperand::USED_AT_START,
+ GetVirtualRegister(input.node()));
+}
+void UseFixed(Input& input, Register reg) {
+ input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
+ GetVirtualRegister(input.node()));
+}
+
+// ---
+// Code gen helpers.
+// ---
+
+void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
+ // TODO(leszeks): Consider special casing the value. (Toon: could possibly
+ // be done through Input directly?)
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input.operand());
+
+ if (operand.IsRegister()) {
+ __ Push(operand.GetRegister());
+ } else {
+ DCHECK(operand.IsStackSlot());
+ __ Push(GetStackSlot(operand));
+ }
+}
+
+// ---
+// Deferred code handling.
+// ---
+
+// Base case provides an error.
+template <typename T, typename Enable = void>
+struct CopyForDeferredHelper {
+ template <typename U>
+ struct No_Copy_Helper_Implemented_For_Type;
+ static void Copy(MaglevCompilationUnit* compilation_unit,
+ No_Copy_Helper_Implemented_For_Type<T>);
+};
+
+// Helper for copies by value.
+template <typename T, typename Enable = void>
+struct CopyForDeferredByValue {
+ static T Copy(MaglevCompilationUnit* compilation_unit, T node) {
+ return node;
+ }
+};
+
+// Node pointers are copied by value.
+template <typename T>
+struct CopyForDeferredHelper<
+ T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
+ : public CopyForDeferredByValue<T*> {};
+// Arithmetic values and enums are copied by value.
+template <typename T>
+struct CopyForDeferredHelper<
+ T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
+ : public CopyForDeferredByValue<T> {};
+template <typename T>
+struct CopyForDeferredHelper<
+ T, typename std::enable_if<std::is_enum<T>::value>::type>
+ : public CopyForDeferredByValue<T> {};
+// MaglevCompilationUnits are copied by value.
+template <>
+struct CopyForDeferredHelper<MaglevCompilationUnit*>
+ : public CopyForDeferredByValue<MaglevCompilationUnit*> {};
+// Machine registers are copied by value.
+template <>
+struct CopyForDeferredHelper<Register>
+ : public CopyForDeferredByValue<Register> {};
+
+// InterpreterFrameState is cloned.
+template <>
+struct CopyForDeferredHelper<const InterpreterFrameState*> {
+ static const InterpreterFrameState* Copy(
+ MaglevCompilationUnit* compilation_unit,
+ const InterpreterFrameState* frame_state) {
+ return compilation_unit->zone()->New<InterpreterFrameState>(
+ *compilation_unit, *frame_state);
+ }
+};
+
+template <typename T>
+T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
+ return CopyForDeferredHelper<T>::Copy(compilation_unit,
+ std::forward<T>(value));
+}
+
+template <typename T>
+T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T& value) {
+ return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
+}
+
+template <typename T>
+T CopyForDeferred(MaglevCompilationUnit* compilation_unit, const T& value) {
+ return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
+}
+
+template <typename Function, typename FunctionPointer = Function>
+struct FunctionArgumentsTupleHelper
+ : FunctionArgumentsTupleHelper<Function,
+ decltype(&FunctionPointer::operator())> {};
+
+template <typename T, typename C, typename R, typename... A>
+struct FunctionArgumentsTupleHelper<T, R (C::*)(A...) const> {
+ using FunctionPointer = R (*)(A...);
+ using Tuple = std::tuple<A...>;
+ static constexpr size_t kSize = sizeof...(A);
+};
+
+template <typename T>
+struct StripFirstTwoTupleArgs;
+
+template <typename T1, typename T2, typename... T>
+struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
+ using Stripped = std::tuple<T...>;
+};
+
+template <typename Function>
+class DeferredCodeInfoImpl final : public MaglevCodeGenState::DeferredCodeInfo {
+ public:
+ using FunctionPointer =
+ typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
+ using Tuple = typename StripFirstTwoTupleArgs<
+ typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
+ static constexpr size_t kSize = FunctionArgumentsTupleHelper<Function>::kSize;
+
+ template <typename... InArgs>
+ explicit DeferredCodeInfoImpl(MaglevCompilationUnit* compilation_unit,
+ FunctionPointer function, InArgs&&... args)
+ : function(function),
+ args(CopyForDeferred(compilation_unit, std::forward<InArgs>(args))...) {
+ }
+
+ DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
+ DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
+
+ void Generate(MaglevCodeGenState* code_gen_state,
+ Label* return_label) override {
+ DoCall(code_gen_state, return_label, std::make_index_sequence<kSize - 2>{});
+ }
+
+ private:
+ template <size_t... I>
+ auto DoCall(MaglevCodeGenState* code_gen_state, Label* return_label,
+ std::index_sequence<I...>) {
+ // TODO(leszeks): This could be replaced with std::apply in C++17.
+ return function(code_gen_state, return_label, std::get<I>(args)...);
+ }
+
+ FunctionPointer function;
+ Tuple args;
+};
+
+template <typename Function, typename... Args>
+void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
+ Function&& deferred_code_gen, Args&&... args) {
+ using DeferredCodeInfoT = DeferredCodeInfoImpl<Function>;
+ DeferredCodeInfoT* deferred_code =
+ code_gen_state->compilation_unit()->zone()->New<DeferredCodeInfoT>(
+ code_gen_state->compilation_unit(), deferred_code_gen,
+ std::forward<Args>(args)...);
+
+ code_gen_state->PushDeferredCode(deferred_code);
+ if (FLAG_code_comments) {
+ __ RecordComment("-- Jump to deferred code");
+ }
+ __ j(cond, &deferred_code->deferred_code_label);
+ __ bind(&deferred_code->return_label);
+}
+
+// ---
+// Deopt
+// ---
+
+void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
+ int deopt_bytecode_position,
+ const InterpreterFrameState* checkpoint_state) {
+ DCHECK(node->properties().can_deopt());
+ // TODO(leszeks): Extract to separate call, or at the very least defer.
+
+ // TODO(leszeks): Stack check.
+ MaglevCompilationUnit* compilation_unit = code_gen_state->compilation_unit();
+ int maglev_frame_size = code_gen_state->vreg_slots();
+
+ ASM_CODE_COMMENT_STRING(code_gen_state->masm(), "Deoptimize");
+ __ RecordComment("Push registers and load accumulator");
+ int num_saved_slots = 0;
+ // TODO(verwaest): We probably shouldn't be spilling all values that go
+ // through deopt :)
+ for (int i = 0; i < compilation_unit->register_count(); ++i) {
+ ValueNode* node = checkpoint_state->get(interpreter::Register(i));
+ if (node == nullptr) continue;
+ __ Push(ToMemOperand(node->spill_slot()));
+ num_saved_slots++;
+ }
+ ValueNode* accumulator = checkpoint_state->accumulator();
+ if (accumulator) {
+ __ movq(kInterpreterAccumulatorRegister,
+ ToMemOperand(accumulator->spill_slot()));
+ }
+
+ __ RecordComment("Load registers from extra pushed slots");
+ int slot = 0;
+ for (int i = 0; i < compilation_unit->register_count(); ++i) {
+ ValueNode* node = checkpoint_state->get(interpreter::Register(i));
+ if (node == nullptr) continue;
+ __ movq(kScratchRegister, MemOperand(rsp, (num_saved_slots - slot++ - 1) *
+ kSystemPointerSize));
+ __ movq(MemOperand(rbp, InterpreterFrameConstants::kRegisterFileFromFp -
+ i * kSystemPointerSize),
+ kScratchRegister);
+ }
+ DCHECK_EQ(slot, num_saved_slots);
+
+ __ RecordComment("Materialize bytecode array and offset");
+ __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
+ compilation_unit->bytecode().object());
+ __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
+ Smi::FromInt(deopt_bytecode_position +
+ (BytecodeArray::kHeaderSize - kHeapObjectTag)));
+
+ // Reset rsp to bytecode sized frame.
+ __ addq(rsp, Immediate((maglev_frame_size + num_saved_slots -
+ (2 + compilation_unit->register_count())) *
+ kSystemPointerSize));
+ __ TailCallBuiltin(Builtin::kBaselineOrInterpreterEnterAtBytecode);
+}
+
+void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
+ const ProcessingState& state) {
+ EmitDeopt(code_gen_state, node, state.checkpoint()->bytecode_position(),
+ state.checkpoint_frame_state());
+}
+
+// ---
+// Print
+// ---
+
+void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeBase* node) {
+ if (!node->has_inputs()) return;
+
+ os << " [";
+ for (int i = 0; i < node->input_count(); i++) {
+ if (i != 0) os << ", ";
+ graph_labeller->PrintInput(os, node->input(i));
+ }
+ os << "]";
+}
+
+void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeBase* node) {}
+
+void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const ValueNode* node) {
+ os << " → " << node->result().operand();
+ if (node->has_valid_live_range()) {
+ os << ", live range: [" << node->live_range().start << "-"
+ << node->live_range().end << "]";
+ }
+}
+
+void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeBase* node) {}
+
+void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const UnconditionalControlNode* node) {
+ os << " b" << graph_labeller->BlockId(node->target());
+}
+
+void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const ConditionalControlNode* node) {
+ os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
+ << graph_labeller->BlockId(node->if_false());
+}
+
+template <typename NodeT>
+void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
+ const NodeT* node) {
+ os << node->opcode();
+ node->PrintParams(os, graph_labeller);
+ PrintInputs(os, graph_labeller, node);
+ PrintResult(os, graph_labeller, node);
+ PrintTargets(os, graph_labeller, node);
+}
+
+} // namespace
+
+void NodeBase::Print(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ switch (opcode()) {
+#define V(Name) \
+ case Opcode::k##Name: \
+ return PrintImpl(os, graph_labeller, this->Cast<Name>());
+ NODE_BASE_LIST(V)
+#undef V
+ }
+ UNREACHABLE();
+}
+
+// ---
+// Nodes
+// ---
+void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ __ Move(ToRegister(result()), Immediate(value()));
+}
+void SmiConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << value() << ")";
+}
+
+void Checkpoint::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {}
+void Checkpoint::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << PrintNodeLabel(graph_labeller, accumulator()) << ")";
+}
+
+void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ EmitDeopt(code_gen_state, this, state);
+}
+
+void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+void Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << object_ << ")";
+}
+
+void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ // TODO(leszeks): Make this nicer.
+ result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
+ (StandardFrameConstants::kExpressionsOffset -
+ UnoptimizedFrameConstants::kRegisterFileFromFp) /
+ kSystemPointerSize +
+ source().index(),
+ vreg_state->AllocateVirtualRegister());
+}
+void InitialValue::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // No-op, the value is already in the appropriate slot.
+}
+void InitialValue::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << source().ToString() << ")";
+}
+
+void LoadGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(context(), kContextRegister);
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+void LoadGlobal::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
+
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+
+ // TODO(jgruber): Detect properly.
+ const int ic_kind =
+ static_cast<int>(FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
+
+ __ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
+ LoadGlobalNoFeedbackDescriptor::kName),
+ name().object());
+ __ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
+ LoadGlobalNoFeedbackDescriptor::kICKind),
+ Immediate(Smi::FromInt(ic_kind)));
+
+ // TODO(jgruber): Implement full LoadGlobal handling.
+ __ CallBuiltin(Builtin::kLoadGlobalIC_NoFeedback);
+}
+void LoadGlobal::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name() << ")";
+}
+
+void RegisterInput::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsFixed(vreg_state, this, input());
+}
+void RegisterInput::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // Nothing to be done, the value is already in the register.
+}
+void RegisterInput::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << input() << ")";
+}
+
+void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ if (!has_valid_live_range()) return;
+
+ Register reg = ToRegister(result());
+ __ LoadRoot(reg, index());
+}
+void RootConstant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << RootsTable::name(index()) << ")";
+}
+
+void CheckMaps::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(actual_map_input());
+ set_temporaries_needed(1);
+}
+void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register object = ToRegister(actual_map_input());
+ RegList temps = temporaries();
+ Register map_tmp = temps.PopFirst();
+
+ __ LoadMap(map_tmp, object);
+ __ Cmp(map_tmp, map().object());
+
+ // TODO(leszeks): Encode as a bit on CheckMaps.
+ if (map().object()->is_migration_target()) {
+ JumpToDeferredIf(
+ not_equal, code_gen_state,
+ [](MaglevCodeGenState* code_gen_state, Label* return_label,
+ Register object, CheckMaps* node, int checkpoint_position,
+ const InterpreterFrameState* checkpoint_state_snapshot,
+ Register map_tmp) {
+ Label deopt;
+
+ // If the map is not deprecated, deopt straight away.
+ __ movl(kScratchRegister,
+ FieldOperand(map_tmp, Map::kBitField3Offset));
+ __ testl(kScratchRegister,
+ Immediate(Map::Bits3::IsDeprecatedBit::kMask));
+ __ j(zero, &deopt);
+
+ // Otherwise, try migrating the object. If the migration returns Smi
+ // zero, then it failed and we should deopt.
+ __ Push(object);
+ __ Move(kContextRegister,
+ code_gen_state->broker()->target_native_context().object());
+ // TODO(verwaest): We're calling so we need to spill around it.
+ __ CallRuntime(Runtime::kTryMigrateInstance);
+ __ cmpl(kReturnRegister0, Immediate(0));
+ __ j(equal, &deopt);
+
+ // The migrated object is returned on success, retry the map check.
+ __ Move(object, kReturnRegister0);
+ __ LoadMap(map_tmp, object);
+ __ Cmp(map_tmp, node->map().object());
+ __ j(equal, return_label);
+
+ __ bind(&deopt);
+ EmitDeopt(code_gen_state, node, checkpoint_position,
+ checkpoint_state_snapshot);
+ },
+ object, this, state.checkpoint()->bytecode_position(),
+ state.checkpoint_frame_state(), map_tmp);
+ } else {
+ Label is_ok;
+ __ j(equal, &is_ok);
+ EmitDeopt(code_gen_state, this, state);
+ __ bind(&is_ok);
+ }
+}
+void CheckMaps::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << *map().object() << ")";
+}
+
+void LoadField::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(object_input());
+ DefineAsRegister(vreg_state, this);
+}
+void LoadField::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // os << "kField, is in object = "
+ // << LoadHandler::IsInobjectBits::decode(raw_handler)
+ // << ", is double = " << LoadHandler::IsDoubleBits::decode(raw_handler)
+ // << ", field index = " <<
+ // LoadHandler::FieldIndexBits::decode(raw_handler);
+
+ Register object = ToRegister(object_input());
+ int handler = this->handler();
+
+ if (LoadHandler::IsInobjectBits::decode(handler)) {
+ Operand input_field_operand = FieldOperand(
+ object, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
+ __ DecompressAnyTagged(ToRegister(result()), input_field_operand);
+ if (LoadHandler::IsDoubleBits::decode(handler)) {
+ // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
+ UNSUPPORTED();
+ }
+ } else {
+ // TODO(leszeks): Handle out-of-object properties.
+ UNSUPPORTED();
+ }
+}
+void LoadField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << std::hex << handler() << std::dec << ")";
+}
+
+void StoreField::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(object_input());
+ UseRegister(value_input());
+}
+void StoreField::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register value = ToRegister(value_input());
+
+ if (StoreHandler::IsInobjectBits::decode(this->handler())) {
+ Operand operand = FieldOperand(
+ object,
+ StoreHandler::FieldIndexBits::decode(this->handler()) * kTaggedSize);
+ __ StoreTaggedField(operand, value);
+ } else {
+ // TODO(victorgomes): Out-of-object properties.
+ UNSUPPORTED();
+ }
+}
+
+void StoreField::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << std::hex << handler() << std::dec << ")";
+}
+
+void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ using D = LoadNoFeedbackDescriptor;
+ UseFixed(context(), kContextRegister);
+ UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+void LoadNamedGeneric::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ using D = LoadNoFeedbackDescriptor;
+ const int ic_kind = static_cast<int>(FeedbackSlotKind::kLoadProperty);
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+ DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
+ __ Move(D::GetRegisterParameter(D::kName), name().object());
+ __ Move(D::GetRegisterParameter(D::kICKind),
+ Immediate(Smi::FromInt(ic_kind)));
+ __ CallBuiltin(Builtin::kLoadIC_NoFeedback);
+}
+void LoadNamedGeneric::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << name_ << ")";
+}
+
+void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {}
+void StoreToFrame::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << target().ToString() << " ← "
+ << PrintNodeLabel(graph_labeller, value()) << ")";
+}
+
+void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ if (source().IsAnyRegister()) {
+ Register source_reg = ToRegister(source());
+ if (target().IsAnyRegister()) {
+ __ movq(ToRegister(target()), source_reg);
+ } else {
+ __ movq(ToMemOperand(target()), source_reg);
+ }
+ } else {
+ MemOperand source_op = ToMemOperand(source());
+ if (target().IsAnyRegister()) {
+ __ movq(ToRegister(target()), source_op);
+ } else {
+ __ movq(kScratchRegister, source_op);
+ __ movq(ToMemOperand(target()), kScratchRegister);
+ }
+ }
+}
+void GapMove::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << source() << " → " << target() << ")";
+}
+
+namespace {
+
+constexpr Builtin BuiltinFor(Operation operation) {
+ switch (operation) {
+#define CASE(name) \
+ case Operation::k##name: \
+ return Builtin::k##name##_WithFeedback;
+ OPERATION_LIST(CASE)
+#undef CASE
+ }
+}
+
+} // namespace
+
+template <class Derived, Operation kOperation>
+void UnaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
+ MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
+ using D = UnaryOp_WithFeedbackDescriptor;
+ UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+
+template <class Derived, Operation kOperation>
+void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
+ MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
+ using D = UnaryOp_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
+ __ Move(kContextRegister, code_gen_state->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ CallBuiltin(BuiltinFor(kOperation));
+}
+
+template <class Derived, Operation kOperation>
+void BinaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
+ MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
+ using D = BinaryOp_WithFeedbackDescriptor;
+ UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
+ UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+
+template <class Derived, Operation kOperation>
+void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
+ MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
+ using D = BinaryOp_WithFeedbackDescriptor;
+ DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
+ DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight));
+ __ Move(kContextRegister, code_gen_state->native_context().object());
+ __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ CallBuiltin(BuiltinFor(kOperation));
+}
+
+#define DEF_OPERATION(Name) \
+ void Name::AllocateVreg(MaglevVregAllocationState* vreg_state, \
+ const ProcessingState& state) { \
+ Base::AllocateVreg(vreg_state, state); \
+ } \
+ void Name::GenerateCode(MaglevCodeGenState* code_gen_state, \
+ const ProcessingState& state) { \
+ Base::GenerateCode(code_gen_state, state); \
+ }
+GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
+#undef DEF_OPERATION
+
+void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ // Phi inputs are processed in the post-process, once loop phis' inputs'
+ // v-regs are allocated.
+ result().SetUnallocated(
+ compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ vreg_state->AllocateVirtualRegister());
+}
+// TODO(verwaest): Remove after switching the register allocator.
+void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
+ for (Input& input : *this) {
+ UseAny(input);
+ }
+}
+void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ DCHECK_EQ(state.interpreter_frame_state()->get(owner()), this);
+}
+void Phi::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << owner().ToString() << ")";
+}
+
+void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(function(), CallTrampolineDescriptor::GetRegisterParameter(
+ CallTrampolineDescriptor::kFunction));
+ UseFixed(context(), kContextRegister);
+ for (int i = 0; i < num_args(); i++) {
+ UseAny(arg(i));
+ }
+ DefineAsFixed(vreg_state, this, kReturnRegister0);
+}
+void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
+
+ DCHECK_EQ(ToRegister(function()),
+ CallTrampolineDescriptor::GetRegisterParameter(
+ CallTrampolineDescriptor::kFunction));
+ DCHECK_EQ(ToRegister(context()), kContextRegister);
+
+ for (int i = num_args() - 1; i >= 0; --i) {
+ PushInput(code_gen_state, arg(i));
+ }
+
+ uint32_t arg_count = num_args();
+ __ Move(CallTrampolineDescriptor::GetRegisterParameter(
+ CallTrampolineDescriptor::kActualArgumentsCount),
+ Immediate(arg_count));
+
+ // TODO(leszeks): This doesn't collect feedback yet, either pass in the
+ // feedback vector by Handle.
+ __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
+}
+
+void CallUndefinedReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+void CallUndefinedReceiver::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ UNREACHABLE();
+}
+
+// ---
+// Control nodes
+// ---
+void Return::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(value_input(), kReturnRegister0);
+}
+void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
+
+ __ LeaveFrame(StackFrame::BASELINE);
+ __ Ret(code_gen_state->parameter_count() * kSystemPointerSize,
+ kScratchRegister);
+}
+
+void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ // Avoid emitting a jump to the next block.
+ if (target() != state.next_block()) {
+ __ jmp(target()->label());
+ }
+}
+
+void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ __ jmp(target()->label());
+}
+
+void BranchIfTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(condition_input());
+}
+void BranchIfTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register value = ToRegister(condition_input());
+
+ auto* next_block = state.next_block();
+
+ // We don't have any branch probability information, so try to jump
+ // over whatever the next block emitted is.
+ if (if_false() == next_block) {
+ // Jump over the false block if true, otherwise fall through into it.
+ __ JumpIfRoot(value, RootIndex::kTrueValue, if_true()->label());
+ } else {
+ // Jump to the false block if true.
+ __ JumpIfNotRoot(value, RootIndex::kTrueValue, if_false()->label());
+ // Jump to the true block if it's not the next block.
+ if (if_true() != next_block) {
+ __ jmp(if_true()->label());
+ }
+ }
+}
+
+void BranchIfCompare::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void BranchIfCompare::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ USE(operation_);
+ UNREACHABLE();
+}
+
+void BranchIfToBooleanTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseFixed(condition_input(),
+ ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
+}
+void BranchIfToBooleanTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ DCHECK_EQ(ToRegister(condition_input()),
+ ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
+
+ // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
+ // the original value into kInterpreterAccumulatorRegister, so we don't have
+ // to worry about it getting clobbered.
+ __ CallBuiltin(Builtin::kToBooleanForBaselineJump);
+ __ SmiCompare(kReturnRegister1, Smi::zero());
+
+ auto* next_block = state.next_block();
+
+ // We don't have any branch probability information, so try to jump
+ // over whatever the next block emitted is.
+ if (if_false() == next_block) {
+ // Jump over the false block if non zero, otherwise fall through into it.
+ __ j(not_equal, if_true()->label());
+ } else {
+ // Jump to the false block if zero.
+ __ j(equal, if_false()->label());
+ // Fall through or jump to the true block.
+ if (if_true() != next_block) {
+ __ jmp(if_true()->label());
+ }
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h
new file mode 100644
index 0000000000..398f9254d9
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-ir.h
@@ -0,0 +1,1461 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_IR_H_
+#define V8_MAGLEV_MAGLEV_IR_H_
+
+#include "src/base/bit-field.h"
+#include "src/base/macros.h"
+#include "src/base/small-vector.h"
+#include "src/base/threaded-list.h"
+#include "src/codegen/reglist.h"
+#include "src/common/globals.h"
+#include "src/common/operation.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/compiler/heap-refs.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/objects/smi.h"
+#include "src/roots/roots.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class BasicBlock;
+class ProcessingState;
+class MaglevCodeGenState;
+class MaglevGraphLabeller;
+class MaglevVregAllocationState;
+
+// Nodes are either
+// 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
+// 2. Control nodes that store the control flow at the end of basic blocks, and
+// form a separate node hierarchy to non-control nodes.
+//
+// The macro lists below must match the node class hierarchy.
+
+#define GENERIC_OPERATIONS_NODE_LIST(V) \
+ V(GenericAdd) \
+ V(GenericSubtract) \
+ V(GenericMultiply) \
+ V(GenericDivide) \
+ V(GenericModulus) \
+ V(GenericExponentiate) \
+ V(GenericBitwiseAnd) \
+ V(GenericBitwiseOr) \
+ V(GenericBitwiseXor) \
+ V(GenericShiftLeft) \
+ V(GenericShiftRight) \
+ V(GenericShiftRightLogical) \
+ V(GenericBitwiseNot) \
+ V(GenericNegate) \
+ V(GenericIncrement) \
+ V(GenericDecrement) \
+ V(GenericEqual) \
+ V(GenericStrictEqual) \
+ V(GenericLessThan) \
+ V(GenericLessThanOrEqual) \
+ V(GenericGreaterThan) \
+ V(GenericGreaterThanOrEqual)
+
+#define VALUE_NODE_LIST(V) \
+ V(CallProperty) \
+ V(CallUndefinedReceiver) \
+ V(Constant) \
+ V(InitialValue) \
+ V(LoadField) \
+ V(LoadGlobal) \
+ V(LoadNamedGeneric) \
+ V(Phi) \
+ V(RegisterInput) \
+ V(RootConstant) \
+ V(SmiConstant) \
+ GENERIC_OPERATIONS_NODE_LIST(V)
+
+#define NODE_LIST(V) \
+ V(Checkpoint) \
+ V(CheckMaps) \
+ V(GapMove) \
+ V(SoftDeopt) \
+ V(StoreField) \
+ V(StoreToFrame) \
+ VALUE_NODE_LIST(V)
+
+#define CONDITIONAL_CONTROL_NODE_LIST(V) \
+ V(BranchIfTrue) \
+ V(BranchIfToBooleanTrue)
+
+#define UNCONDITIONAL_CONTROL_NODE_LIST(V) \
+ V(Jump) \
+ V(JumpLoop)
+
+#define CONTROL_NODE_LIST(V) \
+ V(Return) \
+ CONDITIONAL_CONTROL_NODE_LIST(V) \
+ UNCONDITIONAL_CONTROL_NODE_LIST(V)
+
+#define NODE_BASE_LIST(V) \
+ NODE_LIST(V) \
+ CONTROL_NODE_LIST(V)
+
+// Define the opcode enum.
+#define DEF_OPCODES(type) k##type,
+enum class Opcode : uint8_t { NODE_BASE_LIST(DEF_OPCODES) };
+#undef DEF_OPCODES
+#define PLUS_ONE(type) +1
+static constexpr int kOpcodeCount = NODE_BASE_LIST(PLUS_ONE);
+static constexpr Opcode kFirstOpcode = static_cast<Opcode>(0);
+static constexpr Opcode kLastOpcode = static_cast<Opcode>(kOpcodeCount - 1);
+#undef PLUS_ONE
+
+const char* ToString(Opcode opcode);
+inline std::ostream& operator<<(std::ostream& os, Opcode opcode) {
+ return os << ToString(opcode);
+}
+
+#define V(Name) Opcode::k##Name,
+static constexpr Opcode kFirstValueNodeOpcode =
+ std::min({VALUE_NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastValueNodeOpcode =
+ std::max({VALUE_NODE_LIST(V) kFirstOpcode});
+
+static constexpr Opcode kFirstNodeOpcode = std::min({NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastNodeOpcode = std::max({NODE_LIST(V) kFirstOpcode});
+
+static constexpr Opcode kFirstConditionalControlNodeOpcode =
+ std::min({CONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastConditionalControlNodeOpcode =
+ std::max({CONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode});
+
+static constexpr Opcode kLastUnconditionalControlNodeOpcode =
+ std::max({UNCONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode});
+static constexpr Opcode kFirstUnconditionalControlNodeOpcode =
+ std::min({UNCONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
+
+static constexpr Opcode kFirstControlNodeOpcode =
+ std::min({CONTROL_NODE_LIST(V) kLastOpcode});
+static constexpr Opcode kLastControlNodeOpcode =
+ std::max({CONTROL_NODE_LIST(V) kFirstOpcode});
+#undef V
+
+constexpr bool IsValueNode(Opcode opcode) {
+ return kFirstValueNodeOpcode <= opcode && opcode <= kLastValueNodeOpcode;
+}
+constexpr bool IsConditionalControlNode(Opcode opcode) {
+ return kFirstConditionalControlNodeOpcode <= opcode &&
+ opcode <= kLastConditionalControlNodeOpcode;
+}
+constexpr bool IsUnconditionalControlNode(Opcode opcode) {
+ return kFirstUnconditionalControlNodeOpcode <= opcode &&
+ opcode <= kLastUnconditionalControlNodeOpcode;
+}
+
+// Forward-declare NodeBase sub-hierarchies.
+class Node;
+class ControlNode;
+class ConditionalControlNode;
+class UnconditionalControlNode;
+class ValueNode;
+
+#define DEF_FORWARD_DECLARATION(type, ...) class type;
+NODE_BASE_LIST(DEF_FORWARD_DECLARATION)
+#undef DEF_FORWARD_DECLARATION
+
+using NodeIdT = uint32_t;
+static constexpr uint32_t kInvalidNodeId = 0;
+
+class OpProperties {
+ public:
+ bool is_call() const { return kIsCallBit::decode(bitfield_); }
+ bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); }
+ bool can_read() const { return kCanReadBit::decode(bitfield_); }
+ bool can_write() const { return kCanWriteBit::decode(bitfield_); }
+ bool non_memory_side_effects() const {
+ return kNonMemorySideEffectsBit::decode(bitfield_);
+ }
+
+ bool is_pure() const { return (bitfield_ | kPureMask) == kPureValue; }
+ bool is_required_when_unused() const {
+ return can_write() || non_memory_side_effects();
+ }
+
+ constexpr OpProperties operator|(const OpProperties& that) {
+ return OpProperties(bitfield_ | that.bitfield_);
+ }
+
+ static constexpr OpProperties Pure() { return OpProperties(kPureValue); }
+ static constexpr OpProperties Call() {
+ return OpProperties(kIsCallBit::encode(true));
+ }
+ static constexpr OpProperties Deopt() {
+ return OpProperties(kCanDeoptBit::encode(true));
+ }
+ static constexpr OpProperties Reading() {
+ return OpProperties(kCanReadBit::encode(true));
+ }
+ static constexpr OpProperties Writing() {
+ return OpProperties(kCanWriteBit::encode(true));
+ }
+ static constexpr OpProperties NonMemorySideEffects() {
+ return OpProperties(kNonMemorySideEffectsBit::encode(true));
+ }
+ static constexpr OpProperties AnySideEffects() {
+ return Reading() | Writing() | NonMemorySideEffects();
+ }
+
+ private:
+ using kIsCallBit = base::BitField<bool, 0, 1>;
+ using kCanDeoptBit = kIsCallBit::Next<bool, 1>;
+ using kCanReadBit = kCanDeoptBit::Next<bool, 1>;
+ using kCanWriteBit = kCanReadBit::Next<bool, 1>;
+ using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
+
+ static const uint32_t kPureMask = kCanReadBit::kMask | kCanWriteBit::kMask |
+ kNonMemorySideEffectsBit::kMask;
+ static const uint32_t kPureValue = kCanReadBit::encode(false) |
+ kCanWriteBit::encode(false) |
+ kNonMemorySideEffectsBit::encode(false);
+
+ constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
+
+ uint32_t bitfield_;
+};
+
+class ValueLocation {
+ public:
+ ValueLocation() = default;
+
+ template <typename... Args>
+ void SetUnallocated(Args&&... args) {
+ DCHECK(operand_.IsInvalid());
+ operand_ = compiler::UnallocatedOperand(args...);
+ }
+
+ template <typename... Args>
+ void SetAllocated(Args&&... args) {
+ DCHECK(operand_.IsUnallocated());
+ operand_ = compiler::AllocatedOperand(args...);
+ }
+
+ // Only to be used on inputs that inherit allocation.
+ template <typename... Args>
+ void InjectAllocated(Args&&... args) {
+ operand_ = compiler::AllocatedOperand(args...);
+ }
+
+ template <typename... Args>
+ void SetConstant(Args&&... args) {
+ DCHECK(operand_.IsUnallocated());
+ operand_ = compiler::ConstantOperand(args...);
+ }
+
+ Register AssignedRegister() const {
+ return Register::from_code(
+ compiler::AllocatedOperand::cast(operand_).register_code());
+ }
+
+ const compiler::InstructionOperand& operand() const { return operand_; }
+ const compiler::InstructionOperand& operand() { return operand_; }
+
+ private:
+ compiler::InstructionOperand operand_;
+};
+
+class Input : public ValueLocation {
+ public:
+ explicit Input(ValueNode* node) : node_(node) {}
+
+ ValueNode* node() const { return node_; }
+
+ NodeIdT next_use_id() const { return next_use_id_; }
+
+ // Used in ValueNode::mark_use
+ NodeIdT* get_next_use_id_address() { return &next_use_id_; }
+
+ private:
+ ValueNode* node_;
+ NodeIdT next_use_id_ = kInvalidNodeId;
+};
+
+// Dummy type for the initial raw allocation.
+struct NodeWithInlineInputs {};
+
+namespace detail {
+// Helper for getting the static opcode of a Node subclass. This is in a
+// "detail" namespace rather than in NodeBase because we can't template
+// specialize outside of namespace scopes before C++17.
+template <class T>
+struct opcode_of_helper;
+
+#define DEF_OPCODE_OF(Name) \
+ template <> \
+ struct opcode_of_helper<Name> { \
+ static constexpr Opcode value = Opcode::k##Name; \
+ };
+NODE_BASE_LIST(DEF_OPCODE_OF)
+#undef DEF_OPCODE_OF
+} // namespace detail
+
+class NodeBase : public ZoneObject {
+ protected:
+ template <class T>
+ static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
+
+ public:
+ template <class Derived, typename... Args>
+ static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs,
+ Args&&... args) {
+ Derived* node =
+ Allocate<Derived>(zone, inputs.size(), std::forward<Args>(args)...);
+
+ int i = 0;
+ for (ValueNode* input : inputs) {
+ DCHECK_NOT_NULL(input);
+ node->set_input(i++, input);
+ }
+
+ return node;
+ }
+
+ // Inputs must be initialized manually.
+ template <class Derived, typename... Args>
+ static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
+ Derived* node =
+ Allocate<Derived>(zone, input_count, std::forward<Args>(args)...);
+ return node;
+ }
+
+ // Overwritten by subclasses.
+ static constexpr OpProperties kProperties = OpProperties::Pure();
+ inline const OpProperties& properties() const;
+
+ constexpr Opcode opcode() const { return OpcodeField::decode(bit_field_); }
+
+ template <class T>
+ constexpr bool Is() const;
+
+ template <class T>
+ constexpr T* Cast() {
+ DCHECK(Is<T>());
+ return static_cast<T*>(this);
+ }
+ template <class T>
+ constexpr const T* Cast() const {
+ DCHECK(Is<T>());
+ return static_cast<const T*>(this);
+ }
+ template <class T>
+ constexpr T* TryCast() {
+ return Is<T>() ? static_cast<T*>(this) : nullptr;
+ }
+
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const {
+ return InputCountField::decode(bit_field_);
+ }
+
+ Input& input(int index) { return *input_address(index); }
+ const Input& input(int index) const { return *input_address(index); }
+
+ // Input iterators, use like:
+ //
+ // for (Input& input : *node) { ... }
+ auto begin() { return std::make_reverse_iterator(input_address(-1)); }
+ auto end() {
+ return std::make_reverse_iterator(input_address(input_count() - 1));
+ }
+
+ constexpr NodeIdT id() const {
+ DCHECK_NE(id_, kInvalidNodeId);
+ return id_;
+ }
+ void set_id(NodeIdT id) {
+ DCHECK_EQ(id_, kInvalidNodeId);
+ DCHECK_NE(id, kInvalidNodeId);
+ id_ = id;
+ }
+
+ int num_temporaries_needed() const {
+#ifdef DEBUG
+ if (kTemporariesState == kUnset) {
+ DCHECK_EQ(num_temporaries_needed_, 0);
+ } else {
+ DCHECK_EQ(kTemporariesState, kNeedsTemporaries);
+ }
+#endif // DEBUG
+ return num_temporaries_needed_;
+ }
+
+ RegList temporaries() const {
+ DCHECK_EQ(kTemporariesState, kHasTemporaries);
+ return temporaries_;
+ }
+
+ void assign_temporaries(RegList list) {
+#ifdef DEBUG
+ if (kTemporariesState == kUnset) {
+ DCHECK_EQ(num_temporaries_needed_, 0);
+ } else {
+ DCHECK_EQ(kTemporariesState, kNeedsTemporaries);
+ }
+ kTemporariesState = kHasTemporaries;
+#endif // DEBUG
+ temporaries_ = list;
+ }
+
+ void Print(std::ostream& os, MaglevGraphLabeller*) const;
+
+ protected:
+ NodeBase(Opcode opcode, size_t input_count)
+ : bit_field_(OpcodeField::encode(opcode) |
+ InputCountField::encode(input_count)) {}
+
+ Input* input_address(int index) {
+ DCHECK_LT(index, input_count());
+ return reinterpret_cast<Input*>(this) - (index + 1);
+ }
+ const Input* input_address(int index) const {
+ DCHECK_LT(index, input_count());
+ return reinterpret_cast<const Input*>(this) - (index + 1);
+ }
+
+ void set_input(int index, ValueNode* input) {
+ new (input_address(index)) Input(input);
+ }
+
+ private:
+ template <class Derived, typename... Args>
+ static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
+ const size_t size = sizeof(Derived) + input_count * sizeof(Input);
+ intptr_t raw_buffer =
+ reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size));
+ void* node_buffer =
+ reinterpret_cast<void*>(raw_buffer + input_count * sizeof(Input));
+ Derived* node =
+ new (node_buffer) Derived(input_count, std::forward<Args>(args)...);
+ return node;
+ }
+
+ protected:
+ // Bitfield specification.
+ using OpcodeField = base::BitField<Opcode, 0, 6>;
+ STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
+ using InputCountField = OpcodeField::Next<uint16_t, 16>;
+ // Subclasses may use the remaining bits.
+ template <class T, int size>
+ using NextBitField = InputCountField::Next<T, size>;
+
+ void set_temporaries_needed(int value) {
+#ifdef DEBUG
+ DCHECK_EQ(kTemporariesState, kUnset);
+ kTemporariesState = kNeedsTemporaries;
+#endif // DEBUG
+ num_temporaries_needed_ = value;
+ }
+
+ uint32_t bit_field_;
+
+ private:
+ NodeIdT id_ = kInvalidNodeId;
+
+ union {
+ int num_temporaries_needed_ = 0;
+ RegList temporaries_;
+ };
+#ifdef DEBUG
+ enum {
+ kUnset,
+ kNeedsTemporaries,
+ kHasTemporaries
+ } kTemporariesState = kUnset;
+#endif // DEBUG
+
+ NodeBase() = delete;
+ NodeBase(const NodeBase&) = delete;
+ NodeBase(NodeBase&&) = delete;
+ NodeBase& operator=(const NodeBase&) = delete;
+ NodeBase& operator=(NodeBase&&) = delete;
+};
+
+template <class T>
+constexpr bool NodeBase::Is() const {
+ return opcode() == opcode_of<T>;
+}
+
+// Specialized sub-hierarchy type checks.
+template <>
+constexpr bool NodeBase::Is<ValueNode>() const {
+ return IsValueNode(opcode());
+}
+template <>
+constexpr bool NodeBase::Is<ConditionalControlNode>() const {
+ return IsConditionalControlNode(opcode());
+}
+template <>
+constexpr bool NodeBase::Is<UnconditionalControlNode>() const {
+ return IsUnconditionalControlNode(opcode());
+}
+
+// The Node class hierarchy contains all non-control nodes.
+class Node : public NodeBase {
+ public:
+ using List = base::ThreadedList<Node>;
+
+ inline ValueLocation& result();
+
+ protected:
+ explicit Node(Opcode opcode, size_t input_count)
+ : NodeBase(opcode, input_count) {}
+
+ private:
+ Node** next() { return &next_; }
+ Node* next_ = nullptr;
+ friend List;
+ friend base::ThreadedListTraits<Node>;
+};
+
+// All non-control nodes with a result.
+class ValueNode : public Node {
+ public:
+ ValueLocation& result() { return result_; }
+ const ValueLocation& result() const { return result_; }
+
+ const compiler::InstructionOperand& hint() const {
+ DCHECK_EQ(state_, kSpillOrHint);
+ DCHECK(spill_or_hint_.IsInvalid() || spill_or_hint_.IsUnallocated());
+ return spill_or_hint_;
+ }
+
+ void SetNoSpillOrHint() {
+ DCHECK_EQ(state_, kLastUse);
+#ifdef DEBUG
+ state_ = kSpillOrHint;
+#endif // DEBUG
+ spill_or_hint_ = compiler::InstructionOperand();
+ }
+
+ bool is_spilled() const {
+ DCHECK_EQ(state_, kSpillOrHint);
+ return spill_or_hint_.IsStackSlot();
+ }
+
+ void Spill(compiler::AllocatedOperand operand) {
+#ifdef DEBUG
+ if (state_ == kLastUse) {
+ state_ = kSpillOrHint;
+ } else {
+ DCHECK(!is_spilled());
+ }
+#endif // DEBUG
+ DCHECK(operand.IsAnyStackSlot());
+ spill_or_hint_ = operand;
+ }
+
+ compiler::AllocatedOperand spill_slot() const {
+ DCHECK_EQ(state_, kSpillOrHint);
+ DCHECK(is_spilled());
+ return compiler::AllocatedOperand::cast(spill_or_hint_);
+ }
+
+ void mark_use(NodeIdT id, Input* use) {
+ DCHECK_EQ(state_, kLastUse);
+ DCHECK_NE(id, kInvalidNodeId);
+ DCHECK_LT(start_id(), id);
+ DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_);
+ end_id_ = id;
+ *last_uses_next_use_id_ = id;
+ if (use) {
+ last_uses_next_use_id_ = use->get_next_use_id_address();
+ }
+ }
+
+ struct LiveRange {
+ NodeIdT start = kInvalidNodeId;
+ NodeIdT end = kInvalidNodeId;
+ };
+
+ bool has_valid_live_range() const { return end_id_ != 0; }
+ LiveRange live_range() const { return {start_id(), end_id_}; }
+ NodeIdT next_use() const { return next_use_; }
+
+ // The following metods should only be used during register allocation, to
+ // mark the _current_ state of this Node according to the register allocator.
+ void set_next_use(NodeIdT use) { next_use_ = use; }
+
+ // A node is dead once it has no more upcoming uses.
+ bool is_dead() const { return next_use_ == kInvalidNodeId; }
+
+ void AddRegister(Register reg) { registers_with_result_.set(reg); }
+ void RemoveRegister(Register reg) { registers_with_result_.clear(reg); }
+ RegList ClearRegisters() {
+ return std::exchange(registers_with_result_, kEmptyRegList);
+ }
+
+ int num_registers() const { return registers_with_result_.Count(); }
+ bool has_register() const { return registers_with_result_ != kEmptyRegList; }
+
+ compiler::AllocatedOperand allocation() const {
+ if (has_register()) {
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ registers_with_result_.first().code());
+ }
+ DCHECK(is_spilled());
+ return compiler::AllocatedOperand::cast(spill_or_hint_);
+ }
+
+ protected:
+ explicit ValueNode(Opcode opcode, size_t input_count)
+ : Node(opcode, input_count),
+ last_uses_next_use_id_(&next_use_)
+#ifdef DEBUG
+ ,
+ state_(kLastUse)
+#endif // DEBUG
+ {
+ }
+
+ // Rename for better pairing with `end_id`.
+ NodeIdT start_id() const { return id(); }
+
+ NodeIdT end_id_ = kInvalidNodeId;
+ NodeIdT next_use_ = kInvalidNodeId;
+ ValueLocation result_;
+ RegList registers_with_result_ = kEmptyRegList;
+ union {
+ // Pointer to the current last use's next_use_id field. Most of the time
+ // this will be a pointer to an Input's next_use_id_ field, but it's
+ // initialized to this node's next_use_ to track the first use.
+ NodeIdT* last_uses_next_use_id_;
+ compiler::InstructionOperand spill_or_hint_;
+ };
+#ifdef DEBUG
+ // TODO(leszeks): Consider spilling into kSpill and kHint.
+ enum { kLastUse, kSpillOrHint } state_;
+#endif // DEBUG
+};
+
+ValueLocation& Node::result() {
+ DCHECK(Is<ValueNode>());
+ return Cast<ValueNode>()->result();
+}
+
+template <class Derived>
+class NodeT : public Node {
+ STATIC_ASSERT(!IsValueNode(opcode_of<Derived>));
+
+ public:
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ const OpProperties& properties() const { return Derived::kProperties; }
+
+ protected:
+ explicit NodeT(size_t input_count) : Node(opcode_of<Derived>, input_count) {}
+};
+
+template <size_t InputCount, class Derived>
+class FixedInputNodeT : public NodeT<Derived> {
+ static constexpr size_t kInputCount = InputCount;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(this->input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit FixedInputNodeT(size_t input_count) : NodeT<Derived>(kInputCount) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+};
+
+template <class Derived>
+class ValueNodeT : public ValueNode {
+ STATIC_ASSERT(IsValueNode(opcode_of<Derived>));
+
+ public:
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ const OpProperties& properties() const { return Derived::kProperties; }
+
+ protected:
+ explicit ValueNodeT(size_t input_count)
+ : ValueNode(opcode_of<Derived>, input_count) {}
+};
+
+template <size_t InputCount, class Derived>
+class FixedInputValueNodeT : public ValueNodeT<Derived> {
+ static constexpr size_t kInputCount = InputCount;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(this->input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit FixedInputValueNodeT(size_t input_count)
+ : ValueNodeT<Derived>(InputCount) {
+ DCHECK_EQ(input_count, InputCount);
+ USE(input_count);
+ }
+};
+
+template <class Derived, Operation kOperation>
+class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
+ using Base = FixedInputValueNodeT<1, Derived>;
+
+ public:
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ static constexpr int kOperandIndex = 0;
+ Input& operand_input() { return Node::input(kOperandIndex); }
+ compiler::FeedbackSource feedback() const { return feedback_; }
+
+ protected:
+ explicit UnaryWithFeedbackNode(size_t input_count,
+ const compiler::FeedbackSource& feedback)
+ : Base(input_count), feedback_(feedback) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ const compiler::FeedbackSource feedback_;
+};
+
+template <class Derived, Operation kOperation>
+class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
+ using Base = FixedInputValueNodeT<2, Derived>;
+
+ public:
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return Node::input(kLeftIndex); }
+ Input& right_input() { return Node::input(kRightIndex); }
+ compiler::FeedbackSource feedback() const { return feedback_; }
+
+ protected:
+ BinaryWithFeedbackNode(size_t input_count,
+ const compiler::FeedbackSource& feedback)
+ : Base(input_count), feedback_(feedback) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ const compiler::FeedbackSource feedback_;
+};
+
+#define DEF_OPERATION_NODE(Name, Super, OpName) \
+ class Name : public Super<Name, Operation::k##OpName> { \
+ using Base = Super<Name, Operation::k##OpName>; \
+ \
+ public: \
+ Name(size_t input_count, const compiler::FeedbackSource& feedback) \
+ : Base(input_count, feedback) {} \
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
+ };
+
+#define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \
+ DEF_OPERATION_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
+#define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \
+ DEF_OPERATION_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
+UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE)
+ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
+COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
+#undef DEF_UNARY_WITH_FEEDBACK_NODE
+#undef DEF_BINARY_WITH_FEEDBACK_NODE
+
+class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
+ using Base = FixedInputValueNodeT<0, InitialValue>;
+
+ public:
+ explicit InitialValue(size_t input_count, interpreter::Register source)
+ : Base(input_count), source_(source) {}
+
+ interpreter::Register source() const { return source_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const interpreter::Register source_;
+};
+
+class RegisterInput : public FixedInputValueNodeT<0, RegisterInput> {
+ using Base = FixedInputValueNodeT<0, RegisterInput>;
+
+ public:
+ explicit RegisterInput(size_t input_count, Register input)
+ : Base(input_count), input_(input) {}
+
+ Register input() const { return input_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const Register input_;
+};
+
+class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
+ using Base = FixedInputValueNodeT<0, SmiConstant>;
+
+ public:
+ explicit SmiConstant(size_t input_count, Smi value)
+ : Base(input_count), value_(value) {}
+
+ Smi value() const { return value_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const Smi value_;
+};
+
+class Constant : public FixedInputValueNodeT<0, Constant> {
+ using Base = FixedInputValueNodeT<0, Constant>;
+
+ public:
+ explicit Constant(size_t input_count, const compiler::HeapObjectRef& object)
+ : Base(input_count), object_(object) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::HeapObjectRef object_;
+};
+
+class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
+ using Base = FixedInputValueNodeT<0, RootConstant>;
+
+ public:
+ explicit RootConstant(size_t input_count, RootIndex index)
+ : Base(input_count), index_(index) {}
+
+ RootIndex index() const { return index_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const RootIndex index_;
+};
+
+class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
+ using Base = FixedInputNodeT<0, Checkpoint>;
+
+ public:
+ explicit Checkpoint(size_t input_count, int bytecode_position,
+ bool accumulator_is_live, ValueNode* accumulator)
+ : Base(input_count),
+ bytecode_position_(bytecode_position),
+ accumulator_(accumulator_is_live ? accumulator : nullptr) {}
+
+ int bytecode_position() const { return bytecode_position_; }
+ bool is_used() const { return IsUsedBit::decode(bit_field_); }
+ void SetUsed() { bit_field_ = IsUsedBit::update(bit_field_, true); }
+ ValueNode* accumulator() const { return accumulator_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ using IsUsedBit = NextBitField<bool, 1>;
+
+ const int bytecode_position_;
+ ValueNode* const accumulator_;
+};
+
+class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
+ using Base = FixedInputNodeT<0, SoftDeopt>;
+
+ public:
+ explicit SoftDeopt(size_t input_count) : Base(input_count) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Deopt();
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
+ using Base = FixedInputNodeT<1, CheckMaps>;
+
+ public:
+ explicit CheckMaps(size_t input_count, const compiler::MapRef& map)
+ : Base(input_count), map_(map) {}
+
+ // TODO(verwaest): This just calls in deferred code, so probably we'll need to
+ // mark that to generate stack maps. Mark as call so we at least clear the
+ // registers since we currently don't properly spill either.
+ static constexpr OpProperties kProperties =
+ OpProperties::Deopt() | OpProperties::Call();
+
+ compiler::MapRef map() const { return map_; }
+
+ static constexpr int kActualMapIndex = 0;
+ Input& actual_map_input() { return input(kActualMapIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::MapRef map_;
+};
+
+class LoadField : public FixedInputValueNodeT<1, LoadField> {
+ using Base = FixedInputValueNodeT<1, LoadField>;
+
+ public:
+ explicit LoadField(size_t input_count, int handler)
+ : Base(input_count), handler_(handler) {}
+
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ int handler() const { return handler_; }
+
+ static constexpr int kObjectIndex = 0;
+ Input& object_input() { return input(kObjectIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int handler_;
+};
+
+class StoreField : public FixedInputNodeT<2, StoreField> {
+ using Base = FixedInputNodeT<2, StoreField>;
+
+ public:
+ explicit StoreField(size_t input_count, int handler)
+ : Base(input_count), handler_(handler) {}
+
+ int handler() const { return handler_; }
+
+ static constexpr int kObjectIndex = 0;
+ static constexpr int kValueIndex = 1;
+ Input& object_input() { return input(kObjectIndex); }
+ Input& value_input() { return input(kValueIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int handler_;
+};
+
+class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
+ using Base = FixedInputValueNodeT<1, LoadGlobal>;
+
+ public:
+ explicit LoadGlobal(size_t input_count, const compiler::NameRef& name)
+ : Base(input_count), name_(name) {}
+
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& context() { return input(0); }
+ const compiler::NameRef& name() const { return name_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::NameRef name_;
+};
+
+class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
+ using Base = FixedInputValueNodeT<2, LoadNamedGeneric>;
+
+ public:
+ explicit LoadNamedGeneric(size_t input_count, const compiler::NameRef& name)
+ : Base(input_count), name_(name) {}
+
+ // The implementation currently calls runtime.
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ compiler::NameRef name() const { return name_; }
+
+ static constexpr int kContextIndex = 0;
+ static constexpr int kObjectIndex = 1;
+ Input& context() { return input(kContextIndex); }
+ Input& object_input() { return input(kObjectIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const compiler::NameRef name_;
+};
+
+class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
+ using Base = FixedInputNodeT<0, StoreToFrame>;
+
+ public:
+ StoreToFrame(size_t input_count, ValueNode* value,
+ interpreter::Register target)
+ : Base(input_count), value_(value), target_(target) {}
+
+ interpreter::Register target() const { return target_; }
+ ValueNode* value() const { return value_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ ValueNode* const value_;
+ const interpreter::Register target_;
+};
+
+class GapMove : public FixedInputNodeT<0, GapMove> {
+ using Base = FixedInputNodeT<0, GapMove>;
+
+ public:
+ GapMove(size_t input_count, compiler::AllocatedOperand source,
+ compiler::AllocatedOperand target)
+ : Base(input_count), source_(source), target_(target) {}
+
+ compiler::AllocatedOperand source() const { return source_; }
+ compiler::AllocatedOperand target() const { return target_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ compiler::AllocatedOperand source_;
+ compiler::AllocatedOperand target_;
+};
+
+// TODO(verwaest): It may make more sense to buffer phis in merged_states until
+// we set up the interpreter frame state for code generation. At that point we
+// can generate correctly-sized phis.
+class Phi : public ValueNodeT<Phi> {
+ using Base = ValueNodeT<Phi>;
+
+ public:
+ using List = base::ThreadedList<Phi>;
+
+ // TODO(jgruber): More intuitive constructors, if possible.
+ Phi(size_t input_count, interpreter::Register owner, int merge_offset)
+ : Base(input_count), owner_(owner), merge_offset_(merge_offset) {}
+
+ interpreter::Register owner() const { return owner_; }
+ int merge_offset() const { return merge_offset_; }
+
+ using Node::set_input;
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void AllocateVregInPostProcess(MaglevVregAllocationState*);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ Phi** next() { return &next_; }
+
+ const interpreter::Register owner_;
+ Phi* next_ = nullptr;
+ const int merge_offset_;
+ friend List;
+ friend base::ThreadedListTraits<Phi>;
+};
+
+class CallProperty : public ValueNodeT<CallProperty> {
+ using Base = ValueNodeT<CallProperty>;
+
+ public:
+ explicit CallProperty(size_t input_count) : Base(input_count) {}
+
+ // This ctor is used when for variable input counts.
+ // Inputs must be initialized manually.
+ CallProperty(size_t input_count, ValueNode* function, ValueNode* context)
+ : Base(input_count) {
+ set_input(0, function);
+ set_input(1, context);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& function() { return input(0); }
+ const Input& function() const { return input(0); }
+ Input& context() { return input(1); }
+ const Input& context() const { return input(1); }
+ int num_args() const { return input_count() - 2; }
+ Input& arg(int i) { return input(i + 2); }
+ void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CallUndefinedReceiver : public ValueNodeT<CallUndefinedReceiver> {
+ using Base = ValueNodeT<CallUndefinedReceiver>;
+
+ public:
+ explicit CallUndefinedReceiver(size_t input_count) : Base(input_count) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& function() { return input(0); }
+ const Input& function() const { return input(0); }
+ Input& context() { return input(1); }
+ const Input& context() const { return input(1); }
+ int num_args() const { return input_count() - 2; }
+ Input& arg(int i) { return input(i + 2); }
+ void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+// Represents either a direct BasicBlock pointer, or an entry in a list of
+// unresolved BasicBlockRefs which will be mutated (in place) at some point into
+// direct BasicBlock pointers.
+class BasicBlockRef {
+ struct BasicBlockRefBuilder;
+
+ public:
+ BasicBlockRef() : next_ref_(nullptr) {
+#ifdef DEBUG
+ state_ = kRefList;
+#endif
+ }
+ explicit BasicBlockRef(BasicBlock* block) : block_ptr_(block) {
+#ifdef DEBUG
+ state_ = kBlockPointer;
+#endif
+ }
+
+ // Refs can't be copied or moved, since they are referenced by `this` pointer
+ // in the ref list.
+ BasicBlockRef(const BasicBlockRef&) = delete;
+ BasicBlockRef(BasicBlockRef&&) = delete;
+ BasicBlockRef& operator=(const BasicBlockRef&) = delete;
+ BasicBlockRef& operator=(BasicBlockRef&&) = delete;
+
+ // Construct a new ref-list mode BasicBlockRef and add it to the given ref
+ // list.
+ explicit BasicBlockRef(BasicBlockRef* ref_list_head) : BasicBlockRef() {
+ BasicBlockRef* old_next_ptr = MoveToRefList(ref_list_head);
+ USE(old_next_ptr);
+ DCHECK_NULL(old_next_ptr);
+ }
+
+ // Change this ref to a direct basic block pointer, returning the old "next"
+ // pointer of the current ref.
+ BasicBlockRef* SetToBlockAndReturnNext(BasicBlock* block) {
+ DCHECK_EQ(state_, kRefList);
+
+ BasicBlockRef* old_next_ptr = next_ref_;
+ block_ptr_ = block;
+#ifdef DEBUG
+ state_ = kBlockPointer;
+#endif
+ return old_next_ptr;
+ }
+
+ // Reset this ref list to null, returning the old ref list (i.e. the old
+ // "next" pointer).
+ BasicBlockRef* Reset() {
+ DCHECK_EQ(state_, kRefList);
+
+ BasicBlockRef* old_next_ptr = next_ref_;
+ next_ref_ = nullptr;
+ return old_next_ptr;
+ }
+
+ // Move this ref to the given ref list, returning the old "next" pointer of
+ // the current ref.
+ BasicBlockRef* MoveToRefList(BasicBlockRef* ref_list_head) {
+ DCHECK_EQ(state_, kRefList);
+ DCHECK_EQ(ref_list_head->state_, kRefList);
+
+ BasicBlockRef* old_next_ptr = next_ref_;
+ next_ref_ = ref_list_head->next_ref_;
+ ref_list_head->next_ref_ = this;
+ return old_next_ptr;
+ }
+
+ BasicBlock* block_ptr() const {
+ DCHECK_EQ(state_, kBlockPointer);
+ return block_ptr_;
+ }
+
+ BasicBlockRef* next_ref() const {
+ DCHECK_EQ(state_, kRefList);
+ return next_ref_;
+ }
+
+ bool has_ref() const {
+ DCHECK_EQ(state_, kRefList);
+ return next_ref_ != nullptr;
+ }
+
+ private:
+ union {
+ BasicBlock* block_ptr_;
+ BasicBlockRef* next_ref_;
+ };
+#ifdef DEBUG
+ enum { kBlockPointer, kRefList } state_;
+#endif // DEBUG
+};
+
+class ControlNode : public NodeBase {
+ public:
+ // A "hole" in control flow is a control node that unconditionally interrupts
+ // linear control flow (either by jumping or by exiting).
+ //
+ // A "post-dominating" hole is a hole that is guaranteed to be be reached in
+ // control flow after this node (i.e. it is a hole that is a post-dominator
+ // of this node).
+ ControlNode* next_post_dominating_hole() const {
+ return next_post_dominating_hole_;
+ }
+ void set_next_post_dominating_hole(ControlNode* node) {
+ DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() ||
+ node->Is<JumpLoop>());
+ next_post_dominating_hole_ = node;
+ }
+
+ protected:
+ explicit ControlNode(Opcode opcode, size_t input_count)
+ : NodeBase(opcode, input_count) {}
+
+ private:
+ ControlNode* next_post_dominating_hole_ = nullptr;
+};
+
+class UnconditionalControlNode : public ControlNode {
+ public:
+ BasicBlock* target() const { return target_.block_ptr(); }
+ int predecessor_id() const { return predecessor_id_; }
+ void set_predecessor_id(int id) { predecessor_id_ = id; }
+
+ protected:
+ explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
+ BasicBlockRef* target_refs)
+ : ControlNode(opcode, input_count), target_(target_refs) {}
+ explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
+ BasicBlock* target)
+ : ControlNode(opcode, input_count), target_(target) {}
+
+ private:
+ const BasicBlockRef target_;
+ int predecessor_id_ = 0;
+};
+
+template <class Derived>
+class UnconditionalControlNodeT : public UnconditionalControlNode {
+ STATIC_ASSERT(IsUnconditionalControlNode(opcode_of<Derived>));
+ static constexpr size_t kInputCount = 0;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit UnconditionalControlNodeT(size_t input_count,
+ BasicBlockRef* target_refs)
+ : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target_refs) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+ explicit UnconditionalControlNodeT(size_t input_count, BasicBlock* target)
+ : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+};
+
+class ConditionalControlNode : public ControlNode {
+ public:
+ ConditionalControlNode(Opcode opcode, size_t input_count,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : ControlNode(opcode, input_count),
+ if_true_(if_true_refs),
+ if_false_(if_false_refs) {}
+
+ BasicBlock* if_true() const { return if_true_.block_ptr(); }
+ BasicBlock* if_false() const { return if_false_.block_ptr(); }
+
+ private:
+ BasicBlockRef if_true_;
+ BasicBlockRef if_false_;
+};
+
+template <size_t InputCount, class Derived>
+class ConditionalControlNodeT : public ConditionalControlNode {
+ STATIC_ASSERT(IsConditionalControlNode(opcode_of<Derived>));
+ static constexpr size_t kInputCount = InputCount;
+
+ public:
+ // Shadowing for static knowledge.
+ constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr bool has_inputs() const { return input_count() > 0; }
+ constexpr uint16_t input_count() const { return kInputCount; }
+ auto end() {
+ return std::make_reverse_iterator(input_address(input_count() - 1));
+ }
+
+ protected:
+ explicit ConditionalControlNodeT(size_t input_count,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : ConditionalControlNode(opcode_of<Derived>, kInputCount, if_true_refs,
+ if_false_refs) {
+ DCHECK_EQ(input_count, kInputCount);
+ USE(input_count);
+ }
+};
+
+class Jump : public UnconditionalControlNodeT<Jump> {
+ using Base = UnconditionalControlNodeT<Jump>;
+
+ public:
+ explicit Jump(size_t input_count, BasicBlockRef* target_refs)
+ : Base(input_count, target_refs) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
+ using Base = UnconditionalControlNodeT<JumpLoop>;
+
+ public:
+ explicit JumpLoop(size_t input_count, BasicBlock* target)
+ : Base(input_count, target) {}
+
+ explicit JumpLoop(size_t input_count, BasicBlockRef* ref)
+ : Base(input_count, ref) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Return : public ControlNode {
+ public:
+ explicit Return(size_t input_count)
+ : ControlNode(opcode_of<Return>, input_count) {}
+
+ Input& value_input() { return input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> {
+ using Base = ConditionalControlNodeT<1, BranchIfTrue>;
+
+ public:
+ explicit BranchIfTrue(size_t input_count, BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : Base(input_count, if_true_refs, if_false_refs) {}
+
+ Input& condition_input() { return input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class BranchIfToBooleanTrue
+ : public ConditionalControlNodeT<1, BranchIfToBooleanTrue> {
+ using Base = ConditionalControlNodeT<1, BranchIfToBooleanTrue>;
+
+ public:
+ explicit BranchIfToBooleanTrue(size_t input_count,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : Base(input_count, if_true_refs, if_false_refs) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Call();
+
+ Input& condition_input() { return input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class BranchIfCompare
+ : public ConditionalControlNodeT<2, BranchIfToBooleanTrue> {
+ using Base = ConditionalControlNodeT<2, BranchIfToBooleanTrue>;
+
+ public:
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return NodeBase::input(kLeftIndex); }
+ Input& right_input() { return NodeBase::input(kRightIndex); }
+
+ explicit BranchIfCompare(size_t input_count, Operation operation,
+ BasicBlockRef* if_true_refs,
+ BasicBlockRef* if_false_refs)
+ : Base(input_count, if_true_refs, if_false_refs), operation_(operation) {}
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+
+ private:
+ Operation operation_;
+};
+
+const OpProperties& NodeBase::properties() const {
+ switch (opcode()) {
+#define V(Name) \
+ case Opcode::k##Name: \
+ return Name::kProperties;
+ NODE_BASE_LIST(V)
+#undef V
+ }
+ UNREACHABLE();
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_IR_H_
diff --git a/deps/v8/src/maglev/maglev-regalloc-data.h b/deps/v8/src/maglev/maglev-regalloc-data.h
new file mode 100644
index 0000000000..f46b701147
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-regalloc-data.h
@@ -0,0 +1,83 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
+#define V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
+
+#include "src/base/pointer-with-payload.h"
+#include "src/codegen/register.h"
+#include "src/compiler/backend/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class ValueNode;
+
+#define COUNT(V) +1
+static constexpr int kAllocatableGeneralRegisterCount =
+ ALLOCATABLE_GENERAL_REGISTERS(COUNT);
+#undef COUNT
+
+struct RegisterStateFlags {
+ // TODO(v8:7700): Use the good old Flags mechanism.
+ static constexpr int kIsMergeShift = 0;
+ static constexpr int kIsInitializedShift = 1;
+
+ const bool is_initialized = false;
+ const bool is_merge = false;
+
+ explicit constexpr operator uintptr_t() const {
+ return (is_initialized ? 1 << kIsInitializedShift : 0) |
+ (is_merge ? 1 << kIsMergeShift : 0);
+ }
+ constexpr explicit RegisterStateFlags(uintptr_t state)
+ : is_initialized((state & (1 << kIsInitializedShift)) != 0),
+ is_merge((state & (1 << kIsMergeShift)) != 0) {}
+ constexpr RegisterStateFlags(bool is_initialized, bool is_merge)
+ : is_initialized(is_initialized), is_merge(is_merge) {}
+};
+constexpr bool operator==(const RegisterStateFlags& left,
+ const RegisterStateFlags& right) {
+ return left.is_initialized == right.is_initialized &&
+ left.is_merge == right.is_merge;
+}
+
+typedef base::PointerWithPayload<void, RegisterStateFlags, 2> RegisterState;
+
+struct RegisterMerge {
+ compiler::AllocatedOperand* operands() {
+ return reinterpret_cast<compiler::AllocatedOperand*>(this + 1);
+ }
+ compiler::AllocatedOperand& operand(size_t i) { return operands()[i]; }
+
+ ValueNode* node;
+};
+
+inline bool LoadMergeState(RegisterState state, RegisterMerge** merge) {
+ DCHECK(state.GetPayload().is_initialized);
+ if (state.GetPayload().is_merge) {
+ *merge = static_cast<RegisterMerge*>(state.GetPointer());
+ return true;
+ }
+ *merge = nullptr;
+ return false;
+}
+
+inline bool LoadMergeState(RegisterState state, ValueNode** node,
+ RegisterMerge** merge) {
+ DCHECK(state.GetPayload().is_initialized);
+ if (LoadMergeState(state, merge)) {
+ *node = (*merge)->node;
+ return true;
+ }
+ *node = static_cast<ValueNode*>(state.GetPointer());
+ return false;
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
diff --git a/deps/v8/src/maglev/maglev-regalloc.cc b/deps/v8/src/maglev/maglev-regalloc.cc
new file mode 100644
index 0000000000..897f2a2d0e
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-regalloc.cc
@@ -0,0 +1,875 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev-regalloc.h"
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-graph-printer.h"
+#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+
+namespace v8 {
+namespace internal {
+
+namespace maglev {
+
+namespace {
+
+constexpr RegisterStateFlags initialized_node{true, false};
+constexpr RegisterStateFlags initialized_merge{true, true};
+
+using BlockReverseIterator = std::vector<BasicBlock>::reverse_iterator;
+
+// A target is a fallthrough of a control node if its ID is the next ID
+// after the control node.
+//
+// TODO(leszeks): Consider using the block iterator instead.
+bool IsTargetOfNodeFallthrough(ControlNode* node, BasicBlock* target) {
+ return node->id() + 1 == target->first_id();
+}
+
+ControlNode* NearestPostDominatingHole(ControlNode* node) {
+ // Conditional control nodes don't cause holes themselves. So, the nearest
+ // post-dominating hole is the conditional control node's next post-dominating
+ // hole.
+ if (node->Is<ConditionalControlNode>()) {
+ return node->next_post_dominating_hole();
+ }
+
+ // If the node is a Jump, it may be a hole, but only if it is not a
+ // fallthrough (jump to the immediately next block). Otherwise, it will point
+ // to the nearest post-dominating hole in its own "next" field.
+ if (Jump* jump = node->TryCast<Jump>()) {
+ if (IsTargetOfNodeFallthrough(jump, jump->target())) {
+ return jump->next_post_dominating_hole();
+ }
+ }
+
+ return node;
+}
+
+bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
+ DCHECK_NOT_NULL(node);
+
+ // TODO(leszeks): We shouldn't have any dead nodes passed into here.
+ if (node->is_dead()) return false;
+
+ // If we're looping, a value can only be live if it was live before the loop.
+ if (target->control_node()->id() <= source->id()) {
+ // Gap moves may already be inserted in the target, so skip over those.
+ return node->id() < target->FirstNonGapMoveId();
+ }
+ // TODO(verwaest): This should be true but isn't because we don't yet
+ // eliminate dead code.
+ // DCHECK_GT(node->next_use, source->id());
+ // TODO(verwaest): Since we don't support deopt yet we can only deal with
+ // direct branches. Add support for holes.
+ return node->live_range().end >= target->first_id();
+}
+
+} // namespace
+
+StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
+ MaglevCompilationUnit* compilation_unit, Graph* graph)
+ : compilation_unit_(compilation_unit) {
+ ComputePostDominatingHoles(graph);
+ AllocateRegisters(graph);
+ graph->set_stack_slots(top_of_stack_);
+}
+
+StraightForwardRegisterAllocator::~StraightForwardRegisterAllocator() = default;
+
+// Compute, for all forward control nodes (i.e. excluding Return and JumpLoop) a
+// tree of post-dominating control flow holes.
+//
+// Control flow which interrupts linear control flow fallthrough for basic
+// blocks is considered to introduce a control flow "hole".
+//
+// A──────┐ │
+// │ Jump │ │
+// └──┬───┘ │
+// { │ B──────┐ │
+// Control flow { │ │ Jump │ │ Linear control flow
+// hole after A { │ └─┬────┘ │
+// { ▼ ▼ Fallthrough │
+// C──────┐ │
+// │Return│ │
+// └──────┘ ▼
+//
+// It is interesting, for each such hole, to know what the next hole will be
+// that we will unconditionally reach on our way to an exit node. Such
+// subsequent holes are in "post-dominators" of the current block.
+//
+// As an example, consider the following CFG, with the annotated holes. The
+// post-dominating hole tree is the transitive closure of the post-dominator
+// tree, up to nodes which are holes (in this example, A, D, F and H).
+//
+// CFG Immediate Post-dominating
+// post-dominators holes
+// A──────┐
+// │ Jump │ A A
+// └──┬───┘ │ │
+// { │ B──────┐ │ │
+// Control flow { │ │ Jump │ │ B │ B
+// hole after A { │ └─┬────┘ │ │ │ │
+// { ▼ ▼ │ │ │ │
+// C──────┐ │ │ │ │
+// │Branch│ └►C◄┘ │ C │
+// └┬────┬┘ │ │ │ │
+// ▼ │ │ │ │ │
+// D──────┐│ │ │ │ │
+// │ Jump ││ D │ │ D │ │
+// └──┬───┘▼ │ │ │ │ │ │
+// { │ E──────┐ │ │ │ │ │ │
+// Control flow { │ │ Jump │ │ │ E │ │ │ E │
+// hole after D { │ └─┬────┘ │ │ │ │ │ │ │ │
+// { ▼ ▼ │ │ │ │ │ │ │ │
+// F──────┐ │ ▼ │ │ │ ▼ │ │
+// │ Jump │ └►F◄┘ └─┴►F◄┴─┘
+// └─────┬┘ │ │
+// { │ G──────┐ │ │
+// Control flow { │ │ Jump │ │ G │ G
+// hole after F { │ └─┬────┘ │ │ │ │
+// { ▼ ▼ │ │ │ │
+// H──────┐ ▼ │ ▼ │
+// │Return│ H◄┘ H◄┘
+// └──────┘
+//
+// Since we only care about forward control, loop jumps are treated the same as
+// returns -- they terminate the post-dominating hole chain.
+//
+void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
+ Graph* graph) {
+ // For all blocks, find the list of jumps that jump over code unreachable from
+ // the block. Such a list of jumps terminates in return or jumploop.
+ for (BasicBlock* block : base::Reversed(*graph)) {
+ ControlNode* control = block->control_node();
+ if (auto node = control->TryCast<Jump>()) {
+ // If the current control node is a jump, prepend it to the list of jumps
+ // at the target.
+ control->set_next_post_dominating_hole(
+ NearestPostDominatingHole(node->target()->control_node()));
+ } else if (auto node = control->TryCast<ConditionalControlNode>()) {
+ ControlNode* first =
+ NearestPostDominatingHole(node->if_true()->control_node());
+ ControlNode* second =
+ NearestPostDominatingHole(node->if_false()->control_node());
+
+ // Either find the merge-point of both branches, or the highest reachable
+ // control-node of the longest branch after the last node of the shortest
+ // branch.
+
+ // As long as there's no merge-point.
+ while (first != second) {
+ // Walk the highest branch to find where it goes.
+ if (first->id() > second->id()) std::swap(first, second);
+
+ // If the first branch returns or jumps back, we've found highest
+ // reachable control-node of the longest branch (the second control
+ // node).
+ if (first->Is<Return>() || first->Is<JumpLoop>()) {
+ control->set_next_post_dominating_hole(second);
+ break;
+ }
+
+ // Continue one step along the highest branch. This may cross over the
+ // lowest branch in case it returns or loops. If labelled blocks are
+ // involved such swapping of which branch is the highest branch can
+ // occur multiple times until a return/jumploop/merge is discovered.
+ first = first->next_post_dominating_hole();
+ }
+
+ // Once the branches merged, we've found the gap-chain that's relevant for
+ // the control node.
+ control->set_next_post_dominating_hole(first);
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::PrintLiveRegs() const {
+ bool first = true;
+ for (Register reg : used_registers()) {
+ ValueNode* node = GetRegisterValue(reg);
+ if (first) {
+ first = false;
+ } else {
+ printing_visitor_->os() << ", ";
+ }
+ printing_visitor_->os() << reg << "=v" << node->id();
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_.reset(new MaglevPrintingVisitor(std::cout));
+ printing_visitor_->PreProcessGraph(compilation_unit_, graph);
+ }
+
+ for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
+ BasicBlock* block = *block_it_;
+
+ // Restore mergepoint state.
+ if (block->has_state()) {
+ InitializeRegisterValues(block->state()->register_state());
+ }
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->PreProcessBasicBlock(compilation_unit_, block);
+ printing_visitor_->os() << "live regs: ";
+ PrintLiveRegs();
+
+ ControlNode* control = NearestPostDominatingHole(block->control_node());
+ if (!control->Is<JumpLoop>()) {
+ printing_visitor_->os() << "\n[holes:";
+ while (true) {
+ if (control->Is<Jump>()) {
+ BasicBlock* target = control->Cast<Jump>()->target();
+ printing_visitor_->os()
+ << " " << control->id() << "-" << target->first_id();
+ control = control->next_post_dominating_hole();
+ DCHECK_NOT_NULL(control);
+ continue;
+ } else if (control->Is<Return>()) {
+ printing_visitor_->os() << " " << control->id() << ".";
+ break;
+ } else if (control->Is<JumpLoop>()) {
+ printing_visitor_->os() << " " << control->id() << "↰";
+ break;
+ }
+ UNREACHABLE();
+ }
+ printing_visitor_->os() << "]";
+ }
+ printing_visitor_->os() << std::endl;
+ }
+
+ // Activate phis.
+ if (block->has_phi()) {
+ // Firstly, make the phi live, and try to assign it to an input
+ // location.
+ for (Phi* phi : *block->phis()) {
+ phi->SetNoSpillOrHint();
+ TryAllocateToInput(phi);
+ }
+ // Secondly try to assign the phi to a free register.
+ for (Phi* phi : *block->phis()) {
+ if (phi->result().operand().IsAllocated()) continue;
+ compiler::InstructionOperand allocation = TryAllocateRegister(phi);
+ if (allocation.IsAllocated()) {
+ phi->result().SetAllocated(
+ compiler::AllocatedOperand::cast(allocation));
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ phi, ProcessingState(compilation_unit_, block_it_, nullptr,
+ nullptr, nullptr));
+ printing_visitor_->os()
+ << "phi (new reg) " << phi->result().operand() << std::endl;
+ }
+ }
+ }
+ // Finally just use a stack slot.
+ for (Phi* phi : *block->phis()) {
+ if (phi->result().operand().IsAllocated()) continue;
+ AllocateSpillSlot(phi);
+ // TODO(verwaest): Will this be used at all?
+ phi->result().SetAllocated(phi->spill_slot());
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ phi, ProcessingState(compilation_unit_, block_it_, nullptr,
+ nullptr, nullptr));
+ printing_visitor_->os()
+ << "phi (stack) " << phi->result().operand() << std::endl;
+ }
+ }
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os() << "live regs: ";
+ PrintLiveRegs();
+ printing_visitor_->os() << std::endl;
+ }
+ }
+
+ node_it_ = block->nodes().begin();
+ for (; node_it_ != block->nodes().end(); ++node_it_) {
+ AllocateNode(*node_it_);
+ }
+ AllocateControlNode(block->control_node(), block);
+ }
+}
+
+void StraightForwardRegisterAllocator::UpdateInputUse(uint32_t use,
+ const Input& input) {
+ ValueNode* node = input.node();
+
+ // The value was already cleared through a previous input.
+ if (node->is_dead()) return;
+
+ // Update the next use.
+ node->set_next_use(input.next_use_id());
+
+ // If a value is dead, make sure it's cleared.
+ if (node->is_dead()) {
+ FreeRegisters(node);
+
+ // If the stack slot is a local slot, free it so it can be reused.
+ if (node->is_spilled()) {
+ compiler::AllocatedOperand slot = node->spill_slot();
+ if (slot.index() > 0) free_slots_.push_back(slot.index());
+ }
+ return;
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
+ for (Input& input : *node) AssignInput(input);
+ AssignTemporaries(node);
+ for (Input& input : *node) UpdateInputUse(node->id(), input);
+
+ if (node->properties().is_call()) SpillAndClearRegisters();
+ // TODO(verwaest): This isn't a good idea :)
+ if (node->properties().can_deopt()) SpillRegisters();
+
+ // Allocate node output.
+ if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
+ nullptr));
+ printing_visitor_->os() << "live regs: ";
+ PrintLiveRegs();
+ printing_visitor_->os() << "\n";
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
+ DCHECK(!node->Is<Phi>());
+
+ node->SetNoSpillOrHint();
+
+ compiler::UnallocatedOperand operand =
+ compiler::UnallocatedOperand::cast(node->result().operand());
+
+ if (operand.basic_policy() == compiler::UnallocatedOperand::FIXED_SLOT) {
+ DCHECK(node->Is<InitialValue>());
+ DCHECK_LT(operand.fixed_slot_index(), 0);
+ // Set the stack slot to exactly where the value is.
+ compiler::AllocatedOperand location(compiler::AllocatedOperand::STACK_SLOT,
+ MachineRepresentation::kTagged,
+ operand.fixed_slot_index());
+ node->result().SetAllocated(location);
+ node->Spill(location);
+ return;
+ }
+
+ switch (operand.extended_policy()) {
+ case compiler::UnallocatedOperand::FIXED_REGISTER: {
+ Register r = Register::from_code(operand.fixed_register_index());
+ node->result().SetAllocated(ForceAllocate(r, node));
+ break;
+ }
+
+ case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
+ node->result().SetAllocated(AllocateRegister(node));
+ break;
+
+ case compiler::UnallocatedOperand::SAME_AS_INPUT: {
+ Input& input = node->input(operand.input_index());
+ Register r = input.AssignedRegister();
+ node->result().SetAllocated(ForceAllocate(r, node));
+ break;
+ }
+
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ case compiler::UnallocatedOperand::NONE:
+ case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
+ case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
+ UNREACHABLE();
+ }
+
+ // Immediately kill the register use if the node doesn't have a valid
+ // live-range.
+ // TODO(verwaest): Remove once we can avoid allocating such registers.
+ if (!node->has_valid_live_range() &&
+ node->result().operand().IsAnyRegister()) {
+ DCHECK(node->has_register());
+ FreeRegisters(node);
+ DCHECK(!node->has_register());
+ DCHECK(node->is_dead());
+ }
+}
+
+void StraightForwardRegisterAllocator::DropRegisterValue(Register reg) {
+ // The register should not already be free.
+ DCHECK(!free_registers_.has(reg));
+
+ ValueNode* node = GetRegisterValue(reg);
+
+ // Remove the register from the node's list.
+ node->RemoveRegister(reg);
+
+ // Return if the removed value already has another register or is spilled.
+ if (node->has_register() || node->is_spilled()) return;
+
+ // Try to move the value to another register.
+ if (free_registers_ != kEmptyRegList) {
+ Register target_reg = free_registers_.PopFirst();
+ SetRegister(target_reg, node);
+ // Emit a gapmove.
+ compiler::AllocatedOperand source(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ reg.code());
+ compiler::AllocatedOperand target(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ target_reg.code());
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "gap move: " << PrintNodeLabel(graph_labeller(), node) << ": "
+ << target << " ← " << source << std::endl;
+ }
+ AddMoveBeforeCurrentNode(source, target);
+ return;
+ }
+
+ // If all else fails, spill the value.
+ Spill(node);
+}
+
+void StraightForwardRegisterAllocator::InitializeConditionalBranchRegisters(
+ ConditionalControlNode* control_node, BasicBlock* target) {
+ if (target->is_empty_block()) {
+ // Jumping over an empty block, so we're in fact merging.
+ Jump* jump = target->control_node()->Cast<Jump>();
+ target = jump->target();
+ return MergeRegisterValues(control_node, target, jump->predecessor_id());
+ }
+ if (target->has_state()) {
+ // Not a fall-through branch, copy the state over.
+ return InitializeBranchTargetRegisterValues(control_node, target);
+ }
+ // Clear dead fall-through registers.
+ DCHECK_EQ(control_node->id() + 1, target->first_id());
+ RegList registers = used_registers();
+ while (registers != kEmptyRegList) {
+ Register reg = registers.PopFirst();
+ ValueNode* node = GetRegisterValue(reg);
+ if (!IsLiveAtTarget(node, control_node, target)) {
+ FreeRegisters(node);
+ // Update the registers we're visiting to avoid revisiting this node.
+ registers.clear(free_registers_);
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
+ BasicBlock* block) {
+ for (Input& input : *node) AssignInput(input);
+ AssignTemporaries(node);
+ for (Input& input : *node) UpdateInputUse(node->id(), input);
+
+ if (node->properties().is_call()) SpillAndClearRegisters();
+
+ // Inject allocation into target phis.
+ if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
+ BasicBlock* target = unconditional->target();
+ if (target->has_phi()) {
+ Phi::List* phis = target->phis();
+ for (Phi* phi : *phis) {
+ Input& input = phi->input(block->predecessor_id());
+ input.InjectAllocated(input.node()->allocation());
+ }
+ for (Phi* phi : *phis) {
+ UpdateInputUse(phi->id(), phi->input(block->predecessor_id()));
+ }
+ }
+ }
+
+ // TODO(verwaest): This isn't a good idea :)
+ if (node->properties().can_deopt()) SpillRegisters();
+
+ // Merge register values. Values only flowing into phis and not being
+ // independently live will be killed as part of the merge.
+ if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
+ // Empty blocks are immediately merged at the control of their predecessor.
+ if (!block->is_empty_block()) {
+ MergeRegisterValues(unconditional, unconditional->target(),
+ block->predecessor_id());
+ }
+ } else if (auto conditional = node->TryCast<ConditionalControlNode>()) {
+ InitializeConditionalBranchRegisters(conditional, conditional->if_true());
+ InitializeConditionalBranchRegisters(conditional, conditional->if_false());
+ }
+
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
+ nullptr));
+ }
+}
+
+void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
+ // Try allocate phis to a register used by any of the inputs.
+ for (Input& input : *phi) {
+ if (input.operand().IsRegister()) {
+ Register reg = input.AssignedRegister();
+ if (free_registers_.has(reg)) {
+ phi->result().SetAllocated(ForceAllocate(reg, phi));
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->Process(
+ phi, ProcessingState(compilation_unit_, block_it_, nullptr,
+ nullptr, nullptr));
+ printing_visitor_->os()
+ << "phi (reuse) " << input.operand() << std::endl;
+ }
+ return;
+ }
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
+ compiler::AllocatedOperand source, compiler::AllocatedOperand target) {
+ GapMove* gap_move =
+ Node::New<GapMove>(compilation_unit_->zone(), {}, source, target);
+ if (compilation_unit_->has_graph_labeller()) {
+ graph_labeller()->RegisterNode(gap_move);
+ }
+ if (*node_it_ == nullptr) {
+ // We're at the control node, so append instead.
+ (*block_it_)->nodes().Add(gap_move);
+ node_it_ = (*block_it_)->nodes().end();
+ } else {
+ DCHECK_NE(node_it_, (*block_it_)->nodes().end());
+ node_it_.InsertBefore(gap_move);
+ }
+}
+
+void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
+ if (node->is_spilled()) return;
+ AllocateSpillSlot(node);
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "spill: " << node->spill_slot() << " ← "
+ << PrintNodeLabel(graph_labeller(), node) << std::endl;
+ }
+}
+
+void StraightForwardRegisterAllocator::AssignInput(Input& input) {
+ compiler::UnallocatedOperand operand =
+ compiler::UnallocatedOperand::cast(input.operand());
+ ValueNode* node = input.node();
+ compiler::AllocatedOperand location = node->allocation();
+
+ switch (operand.extended_policy()) {
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
+ case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ input.SetAllocated(location);
+ break;
+
+ case compiler::UnallocatedOperand::FIXED_REGISTER: {
+ Register reg = Register::from_code(operand.fixed_register_index());
+ input.SetAllocated(ForceAllocate(reg, node));
+ break;
+ }
+
+ case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
+ if (location.IsAnyRegister()) {
+ input.SetAllocated(location);
+ } else {
+ input.SetAllocated(AllocateRegister(node));
+ }
+ break;
+
+ case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
+ case compiler::UnallocatedOperand::SAME_AS_INPUT:
+ case compiler::UnallocatedOperand::NONE:
+ case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
+ UNREACHABLE();
+ }
+
+ compiler::AllocatedOperand allocated =
+ compiler::AllocatedOperand::cast(input.operand());
+ if (location != allocated) {
+ if (FLAG_trace_maglev_regalloc) {
+ printing_visitor_->os()
+ << "gap move: " << allocated << " ← " << location << std::endl;
+ }
+ AddMoveBeforeCurrentNode(location, allocated);
+ }
+}
+
+void StraightForwardRegisterAllocator::SpillRegisters() {
+ for (Register reg : used_registers()) {
+ ValueNode* node = GetRegisterValue(reg);
+ Spill(node);
+ }
+}
+
+void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
+ while (used_registers() != kEmptyRegList) {
+ Register reg = used_registers().first();
+ ValueNode* node = GetRegisterValue(reg);
+ Spill(node);
+ FreeRegisters(node);
+ DCHECK(!used_registers().has(reg));
+ }
+}
+
+void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
+ DCHECK(!node->is_spilled());
+ uint32_t free_slot;
+ if (free_slots_.empty()) {
+ free_slot = top_of_stack_++;
+ } else {
+ free_slot = free_slots_.back();
+ free_slots_.pop_back();
+ }
+ node->Spill(compiler::AllocatedOperand(compiler::AllocatedOperand::STACK_SLOT,
+ MachineRepresentation::kTagged,
+ free_slot));
+}
+
+void StraightForwardRegisterAllocator::FreeSomeRegister() {
+ int furthest_use = 0;
+ Register best = Register::no_reg();
+ for (Register reg : used_registers()) {
+ ValueNode* value = GetRegisterValue(reg);
+ // The cheapest register to clear is a register containing a value that's
+ // contained in another register as well.
+ if (value->num_registers() > 1) {
+ best = reg;
+ break;
+ }
+ int use = value->next_use();
+ if (use > furthest_use) {
+ furthest_use = use;
+ best = reg;
+ }
+ }
+ DCHECK(best.is_valid());
+ FreeRegister(best);
+}
+
+compiler::AllocatedOperand StraightForwardRegisterAllocator::AllocateRegister(
+ ValueNode* node) {
+ if (free_registers_ == kEmptyRegList) FreeSomeRegister();
+ compiler::InstructionOperand allocation = TryAllocateRegister(node);
+ DCHECK(allocation.IsAllocated());
+ return compiler::AllocatedOperand::cast(allocation);
+}
+
+compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
+ Register reg, ValueNode* node) {
+ if (free_registers_.has(reg)) {
+ // If it's already free, remove it from the free list.
+ free_registers_.clear(reg);
+ } else if (GetRegisterValue(reg) == node) {
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged,
+ reg.code());
+ } else {
+ DropRegisterValue(reg);
+ }
+#ifdef DEBUG
+ DCHECK(!free_registers_.has(reg));
+#endif
+ SetRegister(reg, node);
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged, reg.code());
+}
+
+void StraightForwardRegisterAllocator::SetRegister(Register reg,
+ ValueNode* node) {
+ DCHECK(!free_registers_.has(reg));
+ register_values_[reg.code()] = node;
+ node->AddRegister(reg);
+}
+
+compiler::InstructionOperand
+StraightForwardRegisterAllocator::TryAllocateRegister(ValueNode* node) {
+ if (free_registers_ == kEmptyRegList) return compiler::InstructionOperand();
+ Register reg = free_registers_.PopFirst();
+
+ // Allocation succeeded. This might have found an existing allocation.
+ // Simply update the state anyway.
+ SetRegister(reg, node);
+ return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
+ MachineRepresentation::kTagged, reg.code());
+}
+
+void StraightForwardRegisterAllocator::AssignTemporaries(NodeBase* node) {
+ int num_temporaries_needed = node->num_temporaries_needed();
+ int num_free_registers = free_registers_.Count();
+
+ // Free extra registers if necessary.
+ for (int i = num_free_registers; i < num_temporaries_needed; ++i) {
+ FreeSomeRegister();
+ }
+
+ DCHECK_GE(free_registers_.Count(), num_temporaries_needed);
+ node->assign_temporaries(free_registers_);
+}
+
+void StraightForwardRegisterAllocator::InitializeRegisterValues(
+ MergePointRegisterState& target_state) {
+ // First clear the register state.
+ while (used_registers() != kEmptyRegList) {
+ Register reg = used_registers().first();
+ ValueNode* node = GetRegisterValue(reg);
+ FreeRegisters(node);
+ DCHECK(!used_registers().has(reg));
+ }
+
+ // All registers should be free by now.
+ DCHECK_EQ(free_registers_, kAllocatableGeneralRegisters);
+
+ // Then fill it in with target information.
+ for (auto entry : target_state) {
+ Register reg = entry.reg;
+
+ ValueNode* node;
+ RegisterMerge* merge;
+ LoadMergeState(entry.state, &node, &merge);
+ if (node != nullptr) {
+ free_registers_.clear(reg);
+ SetRegister(reg, node);
+ } else {
+ DCHECK(!entry.state.GetPayload().is_merge);
+ }
+ }
+}
+
+void StraightForwardRegisterAllocator::EnsureInRegister(
+ MergePointRegisterState& target_state, ValueNode* incoming) {
+#ifdef DEBUG
+ bool found = false;
+ for (auto entry : target_state) {
+ ValueNode* node;
+ RegisterMerge* merge;
+ LoadMergeState(entry.state, &node, &merge);
+ if (node == incoming) found = true;
+ }
+ DCHECK(found);
+#endif
+}
+
+void StraightForwardRegisterAllocator::InitializeBranchTargetRegisterValues(
+ ControlNode* source, BasicBlock* target) {
+ MergePointRegisterState& target_state = target->state()->register_state();
+ DCHECK(!target_state.is_initialized());
+ for (auto entry : target_state) {
+ Register reg = entry.reg;
+ ValueNode* node = nullptr;
+ if (!free_registers_.has(reg)) {
+ node = GetRegisterValue(reg);
+ if (!IsLiveAtTarget(node, source, target)) node = nullptr;
+ }
+ entry.state = {node, initialized_node};
+ }
+}
+
+void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
+ BasicBlock* target,
+ int predecessor_id) {
+ MergePointRegisterState& target_state = target->state()->register_state();
+ if (!target_state.is_initialized()) {
+ // This is the first block we're merging, initialize the values.
+ return InitializeBranchTargetRegisterValues(control, target);
+ }
+
+ int predecessor_count = target->state()->predecessor_count();
+ for (auto entry : target_state) {
+ Register reg = entry.reg;
+
+ ValueNode* node;
+ RegisterMerge* merge;
+ LoadMergeState(entry.state, &node, &merge);
+
+ compiler::AllocatedOperand register_info = {
+ compiler::LocationOperand::REGISTER, MachineRepresentation::kTagged,
+ reg.code()};
+
+ ValueNode* incoming = nullptr;
+ if (!free_registers_.has(reg)) {
+ incoming = GetRegisterValue(reg);
+ if (!IsLiveAtTarget(incoming, control, target)) {
+ incoming = nullptr;
+ }
+ }
+
+ if (incoming == node) {
+ // We're using the same register as the target already has. If registers
+ // are merged, add input information.
+ if (merge) merge->operand(predecessor_id) = register_info;
+ continue;
+ }
+
+ if (merge) {
+ // The register is already occupied with a different node. Figure out
+ // where that node is allocated on the incoming branch.
+ merge->operand(predecessor_id) = node->allocation();
+
+ // If there's a value in the incoming state, that value is either
+ // already spilled or in another place in the merge state.
+ if (incoming != nullptr && incoming->is_spilled()) {
+ EnsureInRegister(target_state, incoming);
+ }
+ continue;
+ }
+
+ DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
+ if (node == nullptr && !incoming->is_spilled()) {
+ // If the register is unallocated at the merge point, and the incoming
+ // value isn't spilled, that means we must have seen it already in a
+ // different register.
+ EnsureInRegister(target_state, incoming);
+ continue;
+ }
+
+ const size_t size = sizeof(RegisterMerge) +
+ predecessor_count * sizeof(compiler::AllocatedOperand);
+ void* buffer = compilation_unit_->zone()->Allocate<void*>(size);
+ merge = new (buffer) RegisterMerge();
+ merge->node = node == nullptr ? incoming : node;
+
+ // If the register is unallocated at the merge point, allocation so far
+ // is the spill slot for the incoming value. Otherwise all incoming
+ // branches agree that the current node is in the register info.
+ compiler::AllocatedOperand info_so_far =
+ node == nullptr
+ ? compiler::AllocatedOperand::cast(incoming->spill_slot())
+ : register_info;
+
+ // Initialize the entire array with info_so_far since we don't know in
+ // which order we've seen the predecessors so far. Predecessors we
+ // haven't seen yet will simply overwrite their entry later.
+ for (int i = 0; i < predecessor_count; i++) {
+ merge->operand(i) = info_so_far;
+ }
+ // If the register is unallocated at the merge point, fill in the
+ // incoming value. Otherwise find the merge-point node in the incoming
+ // state.
+ if (node == nullptr) {
+ merge->operand(predecessor_id) = register_info;
+ } else {
+ merge->operand(predecessor_id) = node->allocation();
+ }
+ entry.state = {merge, initialized_merge};
+ }
+}
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-regalloc.h b/deps/v8/src/maglev/maglev-regalloc.h
new file mode 100644
index 0000000000..c198d2f8fc
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-regalloc.h
@@ -0,0 +1,112 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_REGALLOC_H_
+#define V8_MAGLEV_MAGLEV_REGALLOC_H_
+
+#include "src/codegen/reglist.h"
+#include "src/compiler/backend/instruction.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+#include "src/maglev/maglev-regalloc-data.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class MaglevCompilationUnit;
+class MaglevPrintingVisitor;
+class MergePointRegisterState;
+
+class StraightForwardRegisterAllocator {
+ public:
+ StraightForwardRegisterAllocator(MaglevCompilationUnit* compilation_unit,
+ Graph* graph);
+ ~StraightForwardRegisterAllocator();
+
+ int stack_slots() const { return top_of_stack_; }
+
+ private:
+ std::vector<int> future_register_uses_[Register::kNumRegisters];
+ ValueNode* register_values_[Register::kNumRegisters];
+
+ int top_of_stack_ = 0;
+ RegList free_registers_ = kAllocatableGeneralRegisters;
+ std::vector<uint32_t> free_slots_;
+
+ RegList used_registers() const {
+ // Only allocatable registers should be free.
+ DCHECK_EQ(free_registers_, free_registers_ & kAllocatableGeneralRegisters);
+ return kAllocatableGeneralRegisters ^ free_registers_;
+ }
+
+ void ComputePostDominatingHoles(Graph* graph);
+ void AllocateRegisters(Graph* graph);
+
+ void PrintLiveRegs() const;
+
+ void UpdateInputUse(uint32_t use, const Input& input);
+
+ void AllocateControlNode(ControlNode* node, BasicBlock* block);
+ void AllocateNode(Node* node);
+ void AllocateNodeResult(ValueNode* node);
+ void AssignInput(Input& input);
+ void AssignTemporaries(NodeBase* node);
+ void TryAllocateToInput(Phi* phi);
+
+ void FreeRegisters(ValueNode* node) {
+ RegList list = node->ClearRegisters();
+ DCHECK_EQ(free_registers_ & list, kEmptyRegList);
+ free_registers_ |= list;
+ }
+ void FreeRegister(Register reg) { free_registers_.set(reg); }
+
+ ValueNode* GetRegisterValue(Register reg) const {
+ DCHECK(!free_registers_.has(reg));
+ ValueNode* node = register_values_[reg.code()];
+ DCHECK_NOT_NULL(node);
+ return node;
+ }
+
+ void FreeSomeRegister();
+ void AddMoveBeforeCurrentNode(compiler::AllocatedOperand source,
+ compiler::AllocatedOperand target);
+
+ void AllocateSpillSlot(ValueNode* node);
+ void Spill(ValueNode* node);
+ void SpillAndClearRegisters();
+ void SpillRegisters();
+
+ compiler::AllocatedOperand AllocateRegister(ValueNode* node);
+ compiler::AllocatedOperand ForceAllocate(Register reg, ValueNode* node);
+ void SetRegister(Register reg, ValueNode* node);
+ void DropRegisterValue(Register reg);
+ compiler::InstructionOperand TryAllocateRegister(ValueNode* node);
+
+ void InitializeRegisterValues(MergePointRegisterState& target_state);
+ void EnsureInRegister(MergePointRegisterState& target_state,
+ ValueNode* incoming);
+
+ void InitializeBranchTargetRegisterValues(ControlNode* source,
+ BasicBlock* target);
+ void InitializeConditionalBranchRegisters(ConditionalControlNode* source,
+ BasicBlock* target);
+ void MergeRegisterValues(ControlNode* control, BasicBlock* target,
+ int predecessor_id);
+
+ MaglevGraphLabeller* graph_labeller() const {
+ return compilation_unit_->graph_labeller();
+ }
+
+ MaglevCompilationUnit* compilation_unit_;
+ std::unique_ptr<MaglevPrintingVisitor> printing_visitor_;
+ BlockConstIterator block_it_;
+ NodeIterator node_it_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_REGALLOC_H_
diff --git a/deps/v8/src/maglev/maglev-register-frame-array.h b/deps/v8/src/maglev/maglev-register-frame-array.h
new file mode 100644
index 0000000000..c3032533f6
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-register-frame-array.h
@@ -0,0 +1,113 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
+#define V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
+
+#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-compilation-unit.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+// Vector of values associated with a bytecode's register frame. Indexable by
+// interpreter register.
+template <typename T>
+class RegisterFrameArray {
+ public:
+ explicit RegisterFrameArray(const MaglevCompilationUnit& info) {
+ // The first local is at index zero, parameters are behind it with
+ // negative indices, and the unoptimized frame header is between the two,
+ // so the entire frame state including parameters is the distance from the
+ // last parameter to the last local frame register, plus one to include both
+ // ends.
+ interpreter::Register last_local =
+ interpreter::Register(info.register_count() - 1);
+ interpreter::Register last_param =
+ interpreter::Register::FromParameterIndex(info.parameter_count() - 1);
+ DCHECK_LT(last_param.index(), 0);
+ T* frame =
+ info.zone()->NewArray<T>(last_local.index() - last_param.index() + 1);
+
+ // Set frame_start_ to a "butterfly" pointer into the middle of the above
+ // Zone-allocated array. Parameters are at a negative index, so we have to
+ // subtract it from the above frame pointer.
+ frame_start_ = frame - last_param.index();
+ }
+
+ // Disallow copy (use CopyFrom instead).
+ RegisterFrameArray(const RegisterFrameArray& other) V8_NOEXCEPT = delete;
+ RegisterFrameArray& operator=(const RegisterFrameArray& other)
+ V8_NOEXCEPT = delete;
+
+ // Allow move.
+ RegisterFrameArray(RegisterFrameArray&& other) V8_NOEXCEPT = default;
+ RegisterFrameArray& operator=(RegisterFrameArray&& other)
+ V8_NOEXCEPT = default;
+
+ void CopyFrom(const MaglevCompilationUnit& info,
+ const RegisterFrameArray& other,
+ const compiler::BytecodeLivenessState* liveness) {
+ interpreter::Register last_param =
+ interpreter::Register::FromParameterIndex(info.parameter_count() - 1);
+ int end = 1;
+ if (!liveness) {
+ interpreter::Register last_local =
+ interpreter::Register(info.register_count() - 1);
+ end = last_local.index();
+ }
+ // All parameters are live.
+ for (int index = last_param.index(); index <= end; ++index) {
+ interpreter::Register reg(index);
+ (*this)[reg] = other[reg];
+ }
+ if (liveness) {
+ for (int index : *liveness) {
+ interpreter::Register reg(index);
+ (*this)[reg] = other[reg];
+ }
+ }
+ }
+
+ T& operator[](interpreter::Register reg) { return frame_start_[reg.index()]; }
+
+ const T& operator[](interpreter::Register reg) const {
+ return frame_start_[reg.index()];
+ }
+
+ private:
+ static int DataSize(int register_count, int parameter_count) {
+ // The first local is at index zero, parameters are behind it with
+ // negative indices, and the unoptimized frame header is between the two,
+ // so the entire frame state including parameters is the distance from the
+ // last parameter to the last local frame register, plus one to include both
+ // ends.
+ interpreter::Register last_local =
+ interpreter::Register(register_count - 1);
+ interpreter::Register last_param =
+ interpreter::Register::FromParameterIndex(parameter_count - 1);
+ return last_local.index() - last_param.index() + 1;
+ }
+
+ T* data_begin(int parameter_count) const {
+ return frame_start_ +
+ interpreter::Register::FromParameterIndex(parameter_count - 1)
+ .index();
+ }
+
+ // Butterfly pointer for registers, pointing into the middle of a
+ // Zone-allocated Node array.
+ // |
+ // v
+ // [Parameters] [Unoptimized Frame Header] [Locals]
+ T* frame_start_ = nullptr;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
diff --git a/deps/v8/src/maglev/maglev-vreg-allocator.h b/deps/v8/src/maglev/maglev-vreg-allocator.h
new file mode 100644
index 0000000000..19d5517f70
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-vreg-allocator.h
@@ -0,0 +1,57 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
+#define V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
+
+#include "src/maglev/maglev-basic-block.h"
+#include "src/maglev/maglev-graph.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+class ProcessingState;
+
+class MaglevVregAllocationState {
+ public:
+ int AllocateVirtualRegister() { return next_virtual_register_++; }
+ int num_allocated_registers() const { return next_virtual_register_; }
+
+ private:
+ int next_virtual_register_ = 0;
+};
+
+class MaglevVregAllocator {
+ public:
+ static constexpr bool kNeedsCheckpointStates = true;
+
+ void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
+ for (BasicBlock* block : *graph) {
+ if (!block->has_phi()) continue;
+ for (Phi* phi : *block->phis()) {
+ phi->AllocateVregInPostProcess(&state_);
+ }
+ }
+ }
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+#define DEF_PROCESS_NODE(NAME) \
+ void Process(NAME* node, const ProcessingState& state) { \
+ node->AllocateVreg(&state_, state); \
+ }
+ NODE_BASE_LIST(DEF_PROCESS_NODE)
+#undef DEF_PROCESS_NODE
+
+ private:
+ MaglevVregAllocationState state_;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
diff --git a/deps/v8/src/maglev/maglev.cc b/deps/v8/src/maglev/maglev.cc
new file mode 100644
index 0000000000..6397d02e60
--- /dev/null
+++ b/deps/v8/src/maglev/maglev.cc
@@ -0,0 +1,24 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/maglev/maglev.h"
+
+#include "src/common/globals.h"
+#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
+ Handle<JSFunction> function) {
+ DCHECK(FLAG_maglev);
+ auto info = maglev::MaglevCompilationInfo::New(isolate, function);
+ maglev::MaglevCompilationUnit* const unit = info->toplevel_compilation_unit();
+ maglev::MaglevCompiler::Compile(unit);
+ return maglev::MaglevCompiler::GenerateCode(unit);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/maglev/maglev.h b/deps/v8/src/maglev/maglev.h
new file mode 100644
index 0000000000..e55df23b15
--- /dev/null
+++ b/deps/v8/src/maglev/maglev.h
@@ -0,0 +1,28 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_H_
+#define V8_MAGLEV_MAGLEV_H_
+
+#ifdef V8_ENABLE_MAGLEV
+
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class JSFunction;
+
+class Maglev : public AllStatic {
+ public:
+ static MaybeHandle<CodeT> Compile(Isolate* isolate,
+ Handle<JSFunction> function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ENABLE_MAGLEV
+#endif // V8_MAGLEV_MAGLEV_H_