summaryrefslogtreecommitdiff
path: root/deps/v8/src/maglev
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-04-19 09:00:36 +0200
committerMichaël Zasso <targos@protonmail.com>2022-04-21 11:54:15 +0200
commit6bbc5596b13828a5274a8aeaea4929bdc22168a4 (patch)
tree3fa11feb9240c699aff627e049d33c358a7320a4 /deps/v8/src/maglev
parent0d58c0be3e1c3013959c02d42a2a2f21dd31c5f8 (diff)
downloadnode-new-6bbc5596b13828a5274a8aeaea4929bdc22168a4.tar.gz
deps: update V8 to 10.2.154.2
PR-URL: https://github.com/nodejs/node/pull/42740 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com>
Diffstat (limited to 'deps/v8/src/maglev')
-rw-r--r--deps/v8/src/maglev/OWNERS1
-rw-r--r--deps/v8/src/maglev/maglev-code-gen-state.h73
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.cc317
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.cc6
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.h6
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.cc14
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.h21
-rw-r--r--deps/v8/src/maglev/maglev-compiler.cc88
-rw-r--r--deps/v8/src/maglev/maglev-compiler.h12
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.cc32
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.h9
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.cc585
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.h302
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.cc162
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.h7
-rw-r--r--deps/v8/src/maglev/maglev-graph-processor.h239
-rw-r--r--deps/v8/src/maglev/maglev-graph-verifier.h143
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.h348
-rw-r--r--deps/v8/src/maglev/maglev-ir.cc330
-rw-r--r--deps/v8/src/maglev/maglev-ir.h677
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.cc112
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.h6
-rw-r--r--deps/v8/src/maglev/maglev-vreg-allocator.h2
-rw-r--r--deps/v8/src/maglev/maglev.cc2
24 files changed, 2349 insertions, 1145 deletions
diff --git a/deps/v8/src/maglev/OWNERS b/deps/v8/src/maglev/OWNERS
index dca7476a04..291b217d6e 100644
--- a/deps/v8/src/maglev/OWNERS
+++ b/deps/v8/src/maglev/OWNERS
@@ -1,3 +1,4 @@
leszeks@chromium.org
jgruber@chromium.org
verwaest@chromium.org
+victorgomes@chromium.org
diff --git a/deps/v8/src/maglev/maglev-code-gen-state.h b/deps/v8/src/maglev/maglev-code-gen-state.h
index ecf8bbccda..14a83c0321 100644
--- a/deps/v8/src/maglev/maglev-code-gen-state.h
+++ b/deps/v8/src/maglev/maglev-code-gen-state.h
@@ -19,16 +19,18 @@ namespace v8 {
namespace internal {
namespace maglev {
-class MaglevCodeGenState {
+class InterpreterFrameState;
+
+class DeferredCodeInfo {
public:
- class DeferredCodeInfo {
- public:
- virtual void Generate(MaglevCodeGenState* code_gen_state,
- Label* return_label) = 0;
- Label deferred_code_label;
- Label return_label;
- };
+ virtual void Generate(MaglevCodeGenState* code_gen_state,
+ Label* return_label) = 0;
+ Label deferred_code_label;
+ Label return_label;
+};
+class MaglevCodeGenState {
+ public:
MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
SafepointTableBuilder* safepoint_table_builder)
: compilation_unit_(compilation_unit),
@@ -40,14 +42,19 @@ class MaglevCodeGenState {
void PushDeferredCode(DeferredCodeInfo* deferred_code) {
deferred_code_.push_back(deferred_code);
}
- void EmitDeferredCode() {
- for (auto& deferred_code : deferred_code_) {
- masm()->RecordComment("-- Deferred block");
- masm()->bind(&deferred_code->deferred_code_label);
- deferred_code->Generate(this, &deferred_code->return_label);
- masm()->int3();
- }
+ const std::vector<DeferredCodeInfo*>& deferred_code() const {
+ return deferred_code_;
}
+ void PushEagerDeopt(EagerDeoptInfo* info) { eager_deopts_.push_back(info); }
+ void PushLazyDeopt(LazyDeoptInfo* info) { lazy_deopts_.push_back(info); }
+ const std::vector<EagerDeoptInfo*>& eager_deopts() const {
+ return eager_deopts_;
+ }
+ const std::vector<LazyDeoptInfo*>& lazy_deopts() const {
+ return lazy_deopts_;
+ }
+ inline void DefineSafepointStackSlots(
+ SafepointTableBuilder::Safepoint& safepoint) const;
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
@@ -86,6 +93,8 @@ class MaglevCodeGenState {
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
+ std::vector<EagerDeoptInfo*> eager_deopts_;
+ std::vector<LazyDeoptInfo*> lazy_deopts_;
int vreg_slots_ = 0;
// Allow marking some codegen paths as unsupported, so that we can test maglev
@@ -97,9 +106,24 @@ class MaglevCodeGenState {
// Some helpers for codegen.
// TODO(leszeks): consider moving this to a separate header.
+inline constexpr int GetFramePointerOffsetForStackSlot(int index) {
+ return StandardFrameConstants::kExpressionsOffset -
+ index * kSystemPointerSize;
+}
+
+inline int GetFramePointerOffsetForStackSlot(
+ const compiler::AllocatedOperand& operand) {
+ return GetFramePointerOffsetForStackSlot(operand.index());
+}
+
+inline int GetSafepointIndexForStackSlot(int i) {
+ // Safepoint tables also contain slots for all fixed frame slots (both
+ // above and below the fp).
+ return StandardFrameConstants::kFixedSlotCount + i;
+}
+
inline MemOperand GetStackSlot(int index) {
- return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
- index * kSystemPointerSize);
+ return MemOperand(rbp, GetFramePointerOffsetForStackSlot(index));
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
@@ -122,10 +146,17 @@ inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
-inline int GetSafepointIndexForStackSlot(int i) {
- // Safepoint tables also contain slots for all fixed frame slots (both
- // above and below the fp).
- return StandardFrameConstants::kFixedSlotCount + i;
+inline void MaglevCodeGenState::DefineSafepointStackSlots(
+ SafepointTableBuilder::Safepoint& safepoint) const {
+ DCHECK_EQ(compilation_unit()->stack_value_repr().size(), vreg_slots());
+ int stack_slot = 0;
+ for (ValueRepresentation repr : compilation_unit()->stack_value_repr()) {
+ if (repr == ValueRepresentation::kTagged) {
+ safepoint.DefineTaggedStackSlot(
+ GetSafepointIndexForStackSlot(stack_slot));
+ }
+ stack_slot++;
+ }
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-code-generator.cc b/deps/v8/src/maglev/maglev-code-generator.cc
index f578d53777..d57420ae3e 100644
--- a/deps/v8/src/maglev/maglev-code-generator.cc
+++ b/deps/v8/src/maglev/maglev-code-generator.cc
@@ -7,6 +7,9 @@
#include "src/codegen/code-desc.h"
#include "src/codegen/register.h"
#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/translation-array.h"
+#include "src/execution/frame-constants.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
@@ -15,10 +18,10 @@
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
+#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
-
namespace maglev {
#define __ masm()->
@@ -41,8 +44,6 @@ using StackToRegisterMoves =
class MaglevCodeGeneratingNodeProcessor {
public:
- static constexpr bool kNeedsCheckpointStates = true;
-
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
@@ -51,6 +52,8 @@ class MaglevCodeGeneratingNodeProcessor {
__ int3();
}
+ __ BailoutIfDeoptimized(rbx);
+
__ EnterFrame(StackFrame::BASELINE);
// Save arguments in frame.
@@ -75,18 +78,14 @@ class MaglevCodeGeneratingNodeProcessor {
}
// We don't emit proper safepoint data yet; instead, define a single
- // safepoint at the end of the code object, with all-tagged stack slots.
- // TODO(jgruber): Real safepoint handling.
+ // safepoint at the end of the code object.
+ // TODO(v8:7700): Add better safepoint handling when we support stack reuse.
SafepointTableBuilder::Safepoint safepoint =
safepoint_table_builder()->DefineSafepoint(masm());
- for (int i = 0; i < code_gen_state_->vreg_slots(); i++) {
- safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
- }
+ code_gen_state_->DefineSafepointStackSlots(safepoint);
}
- void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
- code_gen_state_->EmitDeferredCode();
- }
+ void PostProcessGraph(MaglevCompilationUnit*, Graph*) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
if (FLAG_code_comments) {
@@ -305,6 +304,15 @@ class MaglevCodeGeneratingNodeProcessor {
MaglevCodeGenState* code_gen_state_;
};
+constexpr int DeoptStackSlotIndexFromFPOffset(int offset) {
+ return 1 - offset / kSystemPointerSize;
+}
+
+int DeoptStackSlotFromStackSlot(const compiler::AllocatedOperand& operand) {
+ return DeoptStackSlotIndexFromFPOffset(
+ GetFramePointerOffsetForStackSlot(operand));
+}
+
} // namespace
class MaglevCodeGeneratorImpl final {
@@ -315,8 +323,12 @@ class MaglevCodeGeneratorImpl final {
}
private:
+ static constexpr int kFunctionLiteralIndex = 0;
+ static constexpr int kOptimizedOutConstantIndex = 1;
+
MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
: safepoint_table_builder_(compilation_unit->zone()),
+ translation_array_builder_(compilation_unit->zone()),
code_gen_state_(compilation_unit, safepoint_table_builder()),
processor_(compilation_unit, &code_gen_state_),
graph_(graph) {}
@@ -328,7 +340,207 @@ class MaglevCodeGeneratorImpl final {
return BuildCodeObject();
}
- void EmitCode() { processor_.ProcessGraph(graph_); }
+ void EmitCode() {
+ processor_.ProcessGraph(graph_);
+ EmitDeferredCode();
+ EmitDeopts();
+ }
+
+ void EmitDeferredCode() {
+ for (DeferredCodeInfo* deferred_code : code_gen_state_.deferred_code()) {
+ __ RecordComment("-- Deferred block");
+ __ bind(&deferred_code->deferred_code_label);
+ deferred_code->Generate(&code_gen_state_, &deferred_code->return_label);
+ __ Trap();
+ }
+ }
+
+ void EmitDeopts() {
+ deopt_exit_start_offset_ = __ pc_offset();
+
+ __ RecordComment("-- Non-lazy deopts");
+ for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
+ EmitEagerDeopt(deopt_info);
+
+ __ bind(&deopt_info->deopt_entry_label);
+ __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, 0,
+ &deopt_info->deopt_entry_label,
+ DeoptimizeKind::kEager, nullptr, nullptr);
+ }
+
+ __ RecordComment("-- Lazy deopts");
+ int last_updated_safepoint = 0;
+ for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
+ EmitLazyDeopt(deopt_info);
+
+ __ bind(&deopt_info->deopt_entry_label);
+ __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0,
+ &deopt_info->deopt_entry_label,
+ DeoptimizeKind::kLazy, nullptr, nullptr);
+
+ last_updated_safepoint =
+ safepoint_table_builder_.UpdateDeoptimizationInfo(
+ deopt_info->deopting_call_return_pc,
+ deopt_info->deopt_entry_label.pos(), last_updated_safepoint,
+ deopt_info->deopt_index);
+ }
+ }
+
+ void EmitEagerDeopt(EagerDeoptInfo* deopt_info) {
+ int frame_count = 1;
+ int jsframe_count = 1;
+ int update_feedback_count = 0;
+ deopt_info->deopt_index = translation_array_builder_.BeginTranslation(
+ frame_count, jsframe_count, update_feedback_count);
+
+ // Returns are used for updating an accumulator or register after a lazy
+ // deopt.
+ const int return_offset = 0;
+ const int return_count = 0;
+ translation_array_builder_.BeginInterpretedFrame(
+ deopt_info->state.bytecode_position, kFunctionLiteralIndex,
+ code_gen_state_.register_count(), return_offset, return_count);
+
+ EmitDeoptFrameValues(
+ *code_gen_state_.compilation_unit(), deopt_info->state.register_frame,
+ deopt_info->input_locations, interpreter::Register::invalid_value());
+ }
+
+ void EmitLazyDeopt(LazyDeoptInfo* deopt_info) {
+ int frame_count = 1;
+ int jsframe_count = 1;
+ int update_feedback_count = 0;
+ deopt_info->deopt_index = translation_array_builder_.BeginTranslation(
+ frame_count, jsframe_count, update_feedback_count);
+
+ // Return offsets are counted from the end of the translation frame, which
+ // is the array [parameters..., locals..., accumulator].
+ int return_offset;
+ if (deopt_info->result_location ==
+ interpreter::Register::virtual_accumulator()) {
+ return_offset = 0;
+ } else if (deopt_info->result_location.is_parameter()) {
+ // This is slightly tricky to reason about because of zero indexing and
+ // fence post errors. As an example, consider a frame with 2 locals and
+ // 2 parameters, where we want argument index 1 -- looking at the array
+ // in reverse order we have:
+ // [acc, r1, r0, a1, a0]
+ // ^
+ // and this calculation gives, correctly:
+ // 2 + 2 - 1 = 3
+ return_offset = code_gen_state_.register_count() +
+ code_gen_state_.parameter_count() -
+ deopt_info->result_location.ToParameterIndex();
+ } else {
+ return_offset = code_gen_state_.register_count() -
+ deopt_info->result_location.index();
+ }
+ // TODO(leszeks): Support lazy deopts with multiple return values.
+ int return_count = 1;
+ translation_array_builder_.BeginInterpretedFrame(
+ deopt_info->state.bytecode_position, kFunctionLiteralIndex,
+ code_gen_state_.register_count(), return_offset, return_count);
+
+ EmitDeoptFrameValues(
+ *code_gen_state_.compilation_unit(), deopt_info->state.register_frame,
+ deopt_info->input_locations, deopt_info->result_location);
+ }
+
+ void EmitDeoptFrameSingleValue(ValueNode* value,
+ const InputLocation& input_location) {
+ const compiler::AllocatedOperand& operand =
+ compiler::AllocatedOperand::cast(input_location.operand());
+ if (operand.IsRegister()) {
+ if (value->properties().is_untagged_value()) {
+ translation_array_builder_.StoreInt32Register(operand.GetRegister());
+ } else {
+ translation_array_builder_.StoreRegister(operand.GetRegister());
+ }
+ } else {
+ if (value->properties().is_untagged_value()) {
+ translation_array_builder_.StoreInt32StackSlot(
+ DeoptStackSlotFromStackSlot(operand));
+ } else {
+ translation_array_builder_.StoreStackSlot(
+ DeoptStackSlotFromStackSlot(operand));
+ }
+ }
+ }
+
+ void EmitDeoptFrameValues(
+ const MaglevCompilationUnit& compilation_unit,
+ const CompactInterpreterFrameState* checkpoint_state,
+ const InputLocation* input_locations,
+ interpreter::Register result_location) {
+ // Closure
+ int closure_index = DeoptStackSlotIndexFromFPOffset(
+ StandardFrameConstants::kFunctionOffset);
+ translation_array_builder_.StoreStackSlot(closure_index);
+
+ // TODO(leszeks): The input locations array happens to be in the same order
+ // as parameters+locals+accumulator are accessed here. We should make this
+ // clearer and guard against this invariant failing.
+ const InputLocation* input_location = input_locations;
+
+ // Parameters
+ {
+ int i = 0;
+ checkpoint_state->ForEachParameter(
+ compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
+ DCHECK_EQ(reg.ToParameterIndex(), i);
+ if (reg != result_location) {
+ EmitDeoptFrameSingleValue(value, *input_location);
+ } else {
+ translation_array_builder_.StoreLiteral(
+ kOptimizedOutConstantIndex);
+ }
+ i++;
+ input_location++;
+ });
+ }
+
+ // Context
+ int context_index =
+ DeoptStackSlotIndexFromFPOffset(StandardFrameConstants::kContextOffset);
+ translation_array_builder_.StoreStackSlot(context_index);
+
+ // Locals
+ {
+ int i = 0;
+ checkpoint_state->ForEachLocal(
+ compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
+ DCHECK_LE(i, reg.index());
+ if (reg == result_location) {
+ input_location++;
+ return;
+ }
+ while (i < reg.index()) {
+ translation_array_builder_.StoreLiteral(
+ kOptimizedOutConstantIndex);
+ i++;
+ }
+ DCHECK_EQ(i, reg.index());
+ EmitDeoptFrameSingleValue(value, *input_location);
+ i++;
+ input_location++;
+ });
+ while (i < code_gen_state_.register_count()) {
+ translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
+ i++;
+ }
+ }
+
+ // Accumulator
+ {
+ if (checkpoint_state->liveness()->AccumulatorIsLive() &&
+ result_location != interpreter::Register::virtual_accumulator()) {
+ ValueNode* value = checkpoint_state->accumulator(compilation_unit);
+ EmitDeoptFrameSingleValue(value, *input_location);
+ } else {
+ translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
+ }
+ }
+ }
void EmitMetadata() {
// Final alignment before starting on the metadata section.
@@ -345,9 +557,84 @@ class MaglevCodeGeneratorImpl final {
kNoHandlerTableOffset);
return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
.set_stack_slots(stack_slot_count_with_fixed_frame())
+ .set_deoptimization_data(GenerateDeoptimizationData())
.TryBuild();
}
+ Handle<DeoptimizationData> GenerateDeoptimizationData() {
+ int eager_deopt_count =
+ static_cast<int>(code_gen_state_.eager_deopts().size());
+ int lazy_deopt_count =
+ static_cast<int>(code_gen_state_.lazy_deopts().size());
+ int deopt_count = lazy_deopt_count + eager_deopt_count;
+ if (deopt_count == 0) {
+ return DeoptimizationData::Empty(isolate());
+ }
+ Handle<DeoptimizationData> data =
+ DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
+
+ Handle<TranslationArray> translation_array =
+ translation_array_builder_.ToTranslationArray(isolate()->factory());
+
+ data->SetTranslationByteArray(*translation_array);
+ data->SetInlinedFunctionCount(Smi::zero());
+ // TODO(leszeks): Support optimization IDs
+ data->SetOptimizationId(Smi::zero());
+
+ DCHECK_NE(deopt_exit_start_offset_, -1);
+ data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
+ data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count));
+ data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count));
+
+ data->SetSharedFunctionInfo(
+ *code_gen_state_.compilation_unit()->shared_function_info().object());
+
+ // TODO(leszeks): Proper literals array.
+ Handle<DeoptimizationLiteralArray> literals =
+ isolate()->factory()->NewDeoptimizationLiteralArray(2);
+ literals->set(
+ kFunctionLiteralIndex,
+ *code_gen_state_.compilation_unit()->shared_function_info().object());
+ literals->set(kOptimizedOutConstantIndex,
+ ReadOnlyRoots(isolate()).optimized_out());
+ data->SetLiteralArray(*literals);
+
+ // TODO(leszeks): Fix once we have inlining.
+ Handle<PodArray<InliningPosition>> inlining_positions =
+ PodArray<InliningPosition>::New(isolate(), 0);
+ data->SetInliningPositions(*inlining_positions);
+
+ // TODO(leszeks): Fix once we have OSR.
+ BytecodeOffset osr_offset = BytecodeOffset::None();
+ data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(-1));
+
+ // Populate deoptimization entries.
+ int i = 0;
+ for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
+ DCHECK_NE(deopt_info->deopt_index, -1);
+ data->SetBytecodeOffset(i, deopt_info->state.bytecode_position);
+ data->SetTranslationIndex(i, Smi::FromInt(deopt_info->deopt_index));
+ data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label.pos()));
+#ifdef DEBUG
+ data->SetNodeId(i, Smi::FromInt(i));
+#endif // DEBUG
+ i++;
+ }
+ for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
+ DCHECK_NE(deopt_info->deopt_index, -1);
+ data->SetBytecodeOffset(i, deopt_info->state.bytecode_position);
+ data->SetTranslationIndex(i, Smi::FromInt(deopt_info->deopt_index));
+ data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label.pos()));
+#ifdef DEBUG
+ data->SetNodeId(i, Smi::FromInt(i));
+#endif // DEBUG
+ i++;
+ }
+
+ return data;
+ }
+
int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
int stack_slot_count_with_fixed_frame() const {
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
@@ -360,11 +647,17 @@ class MaglevCodeGeneratorImpl final {
SafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
}
+ TranslationArrayBuilder* translation_array_builder() {
+ return &translation_array_builder_;
+ }
SafepointTableBuilder safepoint_table_builder_;
+ TranslationArrayBuilder translation_array_builder_;
MaglevCodeGenState code_gen_state_;
GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
Graph* const graph_;
+
+ int deopt_exit_start_offset_ = -1;
};
// static
diff --git a/deps/v8/src/maglev/maglev-compilation-info.cc b/deps/v8/src/maglev/maglev-compilation-info.cc
index 630d341a66..0b018b1913 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.cc
+++ b/deps/v8/src/maglev/maglev-compilation-info.cc
@@ -69,11 +69,17 @@ MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
zone()->New<compiler::CompilationDependencies>(broker(), zone());
USE(deps); // The deps register themselves in the heap broker.
+ // Heap broker initialization may already use IsPendingAllocation.
+ isolate->heap()->PublishPendingAllocations();
+
broker()->SetTargetNativeContextRef(
handle(function->native_context(), isolate));
broker()->InitializeAndStartSerializing();
broker()->StopSerializing();
+ // Serialization may have allocated.
+ isolate->heap()->PublishPendingAllocations();
+
toplevel_compilation_unit_ =
MaglevCompilationUnit::New(zone(), this, function);
}
diff --git a/deps/v8/src/maglev/maglev-compilation-info.h b/deps/v8/src/maglev/maglev-compilation-info.h
index 70490de218..d8d52402c8 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.h
+++ b/deps/v8/src/maglev/maglev-compilation-info.h
@@ -63,9 +63,6 @@ class MaglevCompilationInfo final {
void set_graph(Graph* graph) { graph_ = graph; }
Graph* graph() const { return graph_; }
- void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
- MaybeHandle<CodeT> codet() const { return codet_; }
-
// Flag accessors (for thread-safe access to global flags).
// TODO(v8:7700): Consider caching these.
#define V(Name) \
@@ -103,9 +100,6 @@ class MaglevCompilationInfo final {
// Produced off-thread during ExecuteJobImpl.
Graph* graph_ = nullptr;
- // Produced during FinalizeJobImpl.
- MaybeHandle<CodeT> codet_;
-
#define V(Name) const bool Name##_;
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.cc b/deps/v8/src/maglev/maglev-compilation-unit.cc
index f35f418de7..5662cecb41 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.cc
+++ b/deps/v8/src/maglev/maglev-compilation-unit.cc
@@ -6,6 +6,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-info.h"
+#include "src/maglev/maglev-graph-labeller.h"
#include "src/objects/js-function-inl.h"
namespace v8 {
@@ -15,13 +16,14 @@ namespace maglev {
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationInfo* info,
Handle<JSFunction> function)
: info_(info),
- bytecode_(
- MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
+ shared_function_info_(MakeRef(broker(), function->shared())),
+ bytecode_(shared_function_info_.GetBytecodeArray()),
feedback_(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis_(bytecode_.object(), zone(), BytecodeOffset::None(),
true),
register_count_(bytecode_.register_count()),
- parameter_count_(bytecode_.parameter_count()) {}
+ parameter_count_(bytecode_.parameter_count()),
+ stack_value_repr_(info->zone()) {}
compiler::JSHeapBroker* MaglevCompilationUnit::broker() const {
return info_->broker();
@@ -40,6 +42,12 @@ MaglevGraphLabeller* MaglevCompilationUnit::graph_labeller() const {
return info_->graph_labeller();
}
+void MaglevCompilationUnit::RegisterNodeInGraphLabeller(const Node* node) {
+ if (has_graph_labeller()) {
+ graph_labeller()->RegisterNode(node);
+ }
+}
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.h b/deps/v8/src/maglev/maglev-compilation-unit.h
index 52e1a775d6..9060aba476 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.h
+++ b/deps/v8/src/maglev/maglev-compilation-unit.h
@@ -13,8 +13,10 @@ namespace v8 {
namespace internal {
namespace maglev {
+enum class ValueRepresentation;
class MaglevCompilationInfo;
class MaglevGraphLabeller;
+class Node;
// Per-unit data, i.e. once per top-level function and once per inlined
// function.
@@ -30,24 +32,43 @@ class MaglevCompilationUnit : public ZoneObject {
MaglevCompilationInfo* info() const { return info_; }
compiler::JSHeapBroker* broker() const;
Isolate* isolate() const;
+ LocalIsolate* local_isolate() const;
Zone* zone() const;
int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; }
bool has_graph_labeller() const;
MaglevGraphLabeller* graph_labeller() const;
+ const compiler::SharedFunctionInfoRef& shared_function_info() const {
+ return shared_function_info_;
+ }
const compiler::BytecodeArrayRef& bytecode() const { return bytecode_; }
const compiler::FeedbackVectorRef& feedback() const { return feedback_; }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return bytecode_analysis_;
}
+ void RegisterNodeInGraphLabeller(const Node* node);
+
+ const ZoneVector<ValueRepresentation>& stack_value_repr() const {
+ return stack_value_repr_;
+ }
+
+ void push_stack_value_repr(ValueRepresentation r) {
+ stack_value_repr_.push_back(r);
+ }
+
private:
MaglevCompilationInfo* const info_;
+ const compiler::SharedFunctionInfoRef shared_function_info_;
const compiler::BytecodeArrayRef bytecode_;
const compiler::FeedbackVectorRef feedback_;
const compiler::BytecodeAnalysis bytecode_analysis_;
const int register_count_;
const int parameter_count_;
+
+ // TODO(victorgomes): Compress these values, if only tagged/untagged, we could
+ // use a binary vector? We might also want to deal with safepoints properly.
+ ZoneVector<ValueRepresentation> stack_value_repr_;
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-compiler.cc b/deps/v8/src/maglev/maglev-compiler.cc
index f4a23d869e..e1de2cc21b 100644
--- a/deps/v8/src/maglev/maglev-compiler.cc
+++ b/deps/v8/src/maglev/maglev-compiler.cc
@@ -31,6 +31,7 @@
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-graph-verifier.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
@@ -46,8 +47,6 @@ namespace maglev {
class NumberingProcessor {
public:
- static constexpr bool kNeedsCheckpointStates = false;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
@@ -62,17 +61,21 @@ class NumberingProcessor {
class UseMarkingProcessor {
public:
- static constexpr bool kNeedsCheckpointStates = true;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
- void Process(NodeBase* node, const ProcessingState& state) {
- if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
+ template <typename NodeT>
+ void Process(NodeT* node, const ProcessingState& state) {
+ if constexpr (NodeT::kProperties.can_eager_deopt()) {
+ MarkCheckpointNodes(node, node->eager_deopt_info(), state);
+ }
for (Input& input : *node) {
input.node()->mark_use(node->id(), &input);
}
+ if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ MarkCheckpointNodes(node, node->lazy_deopt_info(), state);
+ }
}
void Process(Phi* node, const ProcessingState& state) {
@@ -105,30 +108,40 @@ class UseMarkingProcessor {
}
private:
- void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
- const InterpreterFrameState* checkpoint_state =
- state.checkpoint_frame_state();
+ void MarkCheckpointNodes(NodeBase* node, const EagerDeoptInfo* deopt_info,
+ const ProcessingState& state) {
+ const CompactInterpreterFrameState* register_frame =
+ deopt_info->state.register_frame;
int use_id = node->id();
+ int index = 0;
- for (int i = 0; i < state.parameter_count(); i++) {
- interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
- ValueNode* node = checkpoint_state->get(reg);
- if (node) node->mark_use(use_id, nullptr);
- }
- for (int i = 0; i < state.register_count(); i++) {
- interpreter::Register reg = interpreter::Register(i);
- ValueNode* node = checkpoint_state->get(reg);
- if (node) node->mark_use(use_id, nullptr);
- }
- if (checkpoint_state->accumulator()) {
- checkpoint_state->accumulator()->mark_use(use_id, nullptr);
- }
+ register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ node->mark_use(use_id, &deopt_info->input_locations[index++]);
+ });
+ }
+ void MarkCheckpointNodes(NodeBase* node, const LazyDeoptInfo* deopt_info,
+ const ProcessingState& state) {
+ const CompactInterpreterFrameState* register_frame =
+ deopt_info->state.register_frame;
+ int use_id = node->id();
+ int index = 0;
+
+ register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ // Skip over the result location.
+ if (reg == deopt_info->result_location) return;
+ node->mark_use(use_id, &deopt_info->input_locations[index++]);
+ });
}
};
// static
-void MaglevCompiler::Compile(MaglevCompilationUnit* toplevel_compilation_unit) {
- MaglevCompiler compiler(toplevel_compilation_unit);
+void MaglevCompiler::Compile(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* toplevel_compilation_unit) {
+ MaglevCompiler compiler(local_isolate, toplevel_compilation_unit);
compiler.Compile();
}
@@ -142,7 +155,13 @@ void MaglevCompiler::Compile() {
new MaglevGraphLabeller());
}
- MaglevGraphBuilder graph_builder(toplevel_compilation_unit_);
+ // TODO(v8:7700): Support exceptions in maglev. We currently bail if exception
+ // handler table is non-empty.
+ if (toplevel_compilation_unit_->bytecode().handler_table_size() > 0) {
+ return;
+ }
+
+ MaglevGraphBuilder graph_builder(local_isolate(), toplevel_compilation_unit_);
graph_builder.Build();
@@ -156,6 +175,13 @@ void MaglevCompiler::Compile() {
PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
}
+#ifdef DEBUG
+ {
+ GraphProcessor<MaglevGraphVerifier> verifier(toplevel_compilation_unit_);
+ verifier.ProcessGraph(graph_builder.graph());
+ }
+#endif
+
{
GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
MaglevVregAllocator>
@@ -184,11 +210,20 @@ void MaglevCompiler::Compile() {
MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
MaglevCompilationUnit* toplevel_compilation_unit) {
Graph* const graph = toplevel_compilation_unit->info()->graph();
- if (graph == nullptr) return {}; // Compilation failed.
+ if (graph == nullptr) {
+ // Compilation failed.
+ toplevel_compilation_unit->shared_function_info()
+ .object()
+ ->set_maglev_compilation_failed(true);
+ return {};
+ }
Handle<Code> code;
if (!MaglevCodeGenerator::Generate(toplevel_compilation_unit, graph)
.ToHandle(&code)) {
+ toplevel_compilation_unit->shared_function_info()
+ .object()
+ ->set_maglev_compilation_failed(true);
return {};
}
@@ -201,6 +236,7 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
}
Isolate* const isolate = toplevel_compilation_unit->isolate();
+ isolate->native_context()->AddOptimizedCode(ToCodeT(*code));
return ToCodeT(code, isolate);
}
diff --git a/deps/v8/src/maglev/maglev-compiler.h b/deps/v8/src/maglev/maglev-compiler.h
index 79b71552d1..37fb5d0340 100644
--- a/deps/v8/src/maglev/maglev-compiler.h
+++ b/deps/v8/src/maglev/maglev-compiler.h
@@ -24,7 +24,8 @@ class Graph;
class MaglevCompiler {
public:
// May be called from any thread.
- static void Compile(MaglevCompilationUnit* toplevel_compilation_unit);
+ static void Compile(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* toplevel_compilation_unit);
// Called on the main thread after Compile has completed.
// TODO(v8:7700): Move this to a different class?
@@ -32,8 +33,10 @@ class MaglevCompiler {
MaglevCompilationUnit* toplevel_compilation_unit);
private:
- explicit MaglevCompiler(MaglevCompilationUnit* toplevel_compilation_unit)
- : toplevel_compilation_unit_(toplevel_compilation_unit) {}
+ explicit MaglevCompiler(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* toplevel_compilation_unit)
+ : local_isolate_(local_isolate),
+ toplevel_compilation_unit_(toplevel_compilation_unit) {}
void Compile();
@@ -41,8 +44,9 @@ class MaglevCompiler {
return toplevel_compilation_unit_->broker();
}
Zone* zone() { return toplevel_compilation_unit_->zone(); }
- Isolate* isolate() { return toplevel_compilation_unit_->isolate(); }
+ LocalIsolate* local_isolate() { return local_isolate_; }
+ LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const toplevel_compilation_unit_;
};
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
index 762de2455a..0c001f4e5f 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
@@ -4,6 +4,7 @@
#include "src/maglev/maglev-concurrent-dispatcher.h"
+#include "src/codegen/compiler.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/isolate.h"
@@ -85,10 +86,8 @@ std::unique_ptr<MaglevCompilationJob> MaglevCompilationJob::New(
MaglevCompilationJob::MaglevCompilationJob(
std::unique_ptr<MaglevCompilationInfo>&& info)
- : OptimizedCompilationJob(nullptr, kMaglevCompilerName),
+ : OptimizedCompilationJob(kMaglevCompilerName, State::kReadyToPrepare),
info_(std::move(info)) {
- // TODO(jgruber, v8:7700): Remove the OptimizedCompilationInfo (which should
- // be renamed to TurbofanCompilationInfo) from OptimizedCompilationJob.
DCHECK(FLAG_maglev);
}
@@ -102,18 +101,26 @@ CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
RuntimeCallStats* stats, LocalIsolate* local_isolate) {
LocalIsolateScope scope{info(), local_isolate};
- maglev::MaglevCompiler::Compile(info()->toplevel_compilation_unit());
+ maglev::MaglevCompiler::Compile(local_isolate,
+ info()->toplevel_compilation_unit());
// TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
- info()->set_codet(maglev::MaglevCompiler::GenerateCode(
- info()->toplevel_compilation_unit()));
- // TODO(v8:7700): Actual return codes.
+ Handle<CodeT> codet;
+ if (!maglev::MaglevCompiler::GenerateCode(info()->toplevel_compilation_unit())
+ .ToHandle(&codet)) {
+ return CompilationJob::FAILED;
+ }
+ info()->function()->set_code(*codet);
return CompilationJob::SUCCEEDED;
}
+Handle<JSFunction> MaglevCompilationJob::function() const {
+ return info_->function();
+}
+
// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
// processing the incoming queue on a worker thread.
class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
@@ -134,8 +141,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
CHECK_EQ(status, CompilationJob::SUCCEEDED);
outgoing_queue()->Enqueue(std::move(job));
}
- // TODO(v8:7700):
- // isolate_->stack_guard()->RequestInstallMaglevCode();
+ isolate()->stack_guard()->RequestInstallMaglevCode();
}
size_t GetMaxConcurrency(size_t) const override {
@@ -180,12 +186,16 @@ void MaglevConcurrentDispatcher::EnqueueJob(
}
void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
+ HandleScope handle_scope(isolate_);
while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<MaglevCompilationJob> job;
outgoing_queue_.Dequeue(&job);
CompilationJob::Status status = job->FinalizeJob(isolate_);
- // TODO(v8:7700): Use the result.
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ // TODO(v8:7700): Use the result and check if job succeed
+ // when all the bytecodes are implemented.
+ if (status == CompilationJob::SUCCEEDED) {
+ Compiler::FinalizeMaglevCompilationJob(job.get(), isolate_);
+ }
}
}
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
index 0b2a086e5a..fa0e40ac09 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
@@ -21,6 +21,13 @@ namespace maglev {
class MaglevCompilationInfo;
+// TODO(v8:7700): While basic infrastructure now exists, there are many TODOs
+// that should still be addressed soon:
+// - Full tracing support through --trace-opt.
+// - Concurrent codegen.
+// - Concurrent Code object creation (optional?).
+// - Test support for concurrency (see %FinalizeOptimization).
+
// Exports needed functionality without exposing implementation details.
class ExportedMaglevCompilationInfo final {
public:
@@ -47,6 +54,8 @@ class MaglevCompilationJob final : public OptimizedCompilationJob {
LocalIsolate* local_isolate) override;
Status FinalizeJobImpl(Isolate* isolate) override;
+ Handle<JSFunction> function() const;
+
private:
explicit MaglevCompilationJob(std::unique_ptr<MaglevCompilationInfo>&& info);
diff --git a/deps/v8/src/maglev/maglev-graph-builder.cc b/deps/v8/src/maglev/maglev-graph-builder.cc
index b38bece1d5..c7026214cc 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.cc
+++ b/deps/v8/src/maglev/maglev-graph-builder.cc
@@ -4,12 +4,16 @@
#include "src/maglev/maglev-graph-builder.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
+#include "src/compiler/processed-feedback.h"
#include "src/handles/maybe-handles-inl.h"
-#include "src/ic/handler-configuration.h"
+#include "src/ic/handler-configuration-inl.h"
+#include "src/maglev/maglev-ir.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/name-inl.h"
+#include "src/objects/property-cell.h"
#include "src/objects/slots-inl.h"
namespace v8 {
@@ -17,8 +21,22 @@ namespace internal {
namespace maglev {
-MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
- : compilation_unit_(compilation_unit),
+namespace {
+
+int LoadSimpleFieldHandler(FieldIndex field_index) {
+ int config = LoadHandler::KindBits::encode(LoadHandler::Kind::kField) |
+ LoadHandler::IsInobjectBits::encode(field_index.is_inobject()) |
+ LoadHandler::IsDoubleBits::encode(field_index.is_double()) |
+ LoadHandler::FieldIndexBits::encode(field_index.index());
+ return config;
+}
+
+} // namespace
+
+MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* compilation_unit)
+ : local_isolate_(local_isolate),
+ compilation_unit_(compilation_unit),
iterator_(bytecode().object()),
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
// Overallocate merge_states_ by one to allow always looking up the
@@ -69,8 +87,6 @@ MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
- const compiler::BytecodeLivenessState* liveness =
- bytecode_analysis().GetInLivenessFor(0);
int register_index = 0;
// TODO(leszeks): Don't emit if not needed.
ValueNode* undefined_value =
@@ -78,19 +94,16 @@ MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
- StoreRegister(interpreter::Register(register_index), undefined_value,
- liveness);
+ StoreRegister(interpreter::Register(register_index), undefined_value);
}
StoreRegister(
new_target_or_generator_register,
// TODO(leszeks): Expose in Graph.
- AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
- liveness);
+ AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister));
register_index++;
}
for (; register_index < register_count(); register_index++) {
- StoreRegister(interpreter::Register(register_index), undefined_value,
- liveness);
+ StoreRegister(interpreter::Register(register_index), undefined_value);
}
BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
@@ -109,37 +122,48 @@ MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
-template <Operation kOperation, typename... Args>
-ValueNode* MaglevGraphBuilder::AddNewOperationNode(
- std::initializer_list<ValueNode*> inputs, Args&&... args) {
- switch (kOperation) {
-#define CASE(Name) \
- case Operation::k##Name: \
- return AddNewNode<Generic##Name>(inputs, std::forward<Args>(args)...);
- OPERATION_LIST(CASE)
-#undef CASE
- }
-}
+namespace {
+template <Operation kOperation>
+struct NodeForOperationHelper;
+
+#define NODE_FOR_OPERATION_HELPER(Name) \
+ template <> \
+ struct NodeForOperationHelper<Operation::k##Name> { \
+ using generic_type = Generic##Name; \
+ };
+OPERATION_LIST(NODE_FOR_OPERATION_HELPER)
+#undef NODE_FOR_OPERATION_HELPER
+
+template <Operation kOperation>
+using GenericNodeForOperation =
+ typename NodeForOperationHelper<kOperation>::generic_type;
+} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
FeedbackSlot slot_index = GetSlotOperand(0);
- ValueNode* value = GetAccumulator();
- ValueNode* node = AddNewOperationNode<kOperation>(
- {value}, compiler::FeedbackSource{feedback(), slot_index});
- SetAccumulator(node);
- MarkPossibleSideEffect();
+ ValueNode* value = GetAccumulatorTaggedValue();
+ SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
+ {value}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
- ValueNode* left = LoadRegister(0);
+ ValueNode* left = LoadRegisterTaggedValue(0);
+ ValueNode* right = GetAccumulatorTaggedValue();
+ FeedbackSlot slot_index = GetSlotOperand(1);
+ SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
+ {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
+}
+
+template <Operation kOperation>
+void MaglevGraphBuilder::BuildGenericBinarySmiOperationNode() {
+ ValueNode* left = GetAccumulatorTaggedValue();
+ Smi constant = Smi::FromInt(iterator_.GetImmediateOperand(0));
+ ValueNode* right = AddNewNode<SmiConstant>({}, constant);
FeedbackSlot slot_index = GetSlotOperand(1);
- ValueNode* right = GetAccumulator();
- ValueNode* node = AddNewOperationNode<kOperation>(
- {left, right}, compiler::FeedbackSource{feedback(), slot_index});
- SetAccumulator(node);
- MarkPossibleSideEffect();
+ SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
+ {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
@@ -150,11 +174,69 @@ void MaglevGraphBuilder::VisitUnaryOperation() {
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
+ FeedbackNexus nexus = FeedbackNexusForOperand(1);
+
+ if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ if (nexus.kind() == FeedbackSlotKind::kBinaryOp) {
+ BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
+
+ if (hint == BinaryOperationHint::kSignedSmall) {
+ ValueNode *left, *right;
+ if (IsRegisterEqualToAccumulator(0)) {
+ left = right = LoadRegisterSmiUntaggedValue(0);
+ } else {
+ left = LoadRegisterSmiUntaggedValue(0);
+ right = GetAccumulatorSmiUntaggedValue();
+ }
+
+ if (kOperation == Operation::kAdd) {
+ SetAccumulator(AddNewNode<Int32AddWithOverflow>({left, right}));
+ return;
+ }
+ }
+ }
+ }
+
// TODO(victorgomes): Use feedback info and create optimized versions.
BuildGenericBinaryOperationNode<kOperation>();
}
-void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
+template <Operation kOperation>
+void MaglevGraphBuilder::VisitBinarySmiOperation() {
+ FeedbackNexus nexus = FeedbackNexusForOperand(1);
+
+ if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
+ if (nexus.kind() == FeedbackSlotKind::kBinaryOp) {
+ BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
+
+ if (hint == BinaryOperationHint::kSignedSmall) {
+ ValueNode* left = GetAccumulatorSmiUntaggedValue();
+ int32_t constant = iterator_.GetImmediateOperand(0);
+
+ if (kOperation == Operation::kAdd) {
+ if (constant == 0) {
+ // For addition of zero, when the accumulator passed the Smi check,
+ // it already has the right value, so we can just return.
+ return;
+ }
+ // TODO(victorgomes): We could create an Int32Add node that receives
+ // a constant and avoid a register move.
+ ValueNode* right = AddNewNode<Int32Constant>({}, constant);
+ SetAccumulator(AddNewNode<Int32AddWithOverflow>({left, right}));
+ return;
+ }
+ }
+ }
+ }
+
+ // TODO(victorgomes): Use feedback info and create optimized versions.
+ BuildGenericBinarySmiOperationNode<kOperation>();
+}
+
+void MaglevGraphBuilder::VisitLdar() {
+ MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
+ interpreter::Register::virtual_accumulator());
+}
void MaglevGraphBuilder::VisitLdaZero() {
SetAccumulator(AddNewNode<SmiConstant>({}, Smi::zero()));
@@ -178,20 +260,42 @@ void MaglevGraphBuilder::VisitLdaTrue() {
void MaglevGraphBuilder::VisitLdaFalse() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kFalseValue));
}
-MAGLEV_UNIMPLEMENTED_BYTECODE(LdaConstant)
+void MaglevGraphBuilder::VisitLdaConstant() {
+ SetAccumulator(GetConstant(GetRefOperand<HeapObject>(0)));
+}
MAGLEV_UNIMPLEMENTED_BYTECODE(LdaContextSlot)
MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableContextSlot)
-MAGLEV_UNIMPLEMENTED_BYTECODE(LdaCurrentContextSlot)
-MAGLEV_UNIMPLEMENTED_BYTECODE(LdaImmutableCurrentContextSlot)
-void MaglevGraphBuilder::VisitStar() {
- StoreRegister(
- iterator_.GetRegisterOperand(0), GetAccumulator(),
- bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+void MaglevGraphBuilder::VisitLdaCurrentContextSlot() {
+ ValueNode* context = GetContext();
+ int slot_index = iterator_.GetIndexOperand(0);
+
+ // TODO(leszeks): Passing a LoadHandler to LoadField here is a bit of
+ // a hack, maybe we should have a LoadRawOffset or similar.
+ SetAccumulator(AddNewNode<LoadField>(
+ {context},
+ LoadSimpleFieldHandler(FieldIndex::ForInObjectOffset(
+ Context::OffsetOfElementAt(slot_index), FieldIndex::kTagged))));
}
+void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
+ // TODO(leszeks): Consider context specialising.
+ VisitLdaCurrentContextSlot();
+}
+void MaglevGraphBuilder::VisitStar() {
+ MoveNodeBetweenRegisters(interpreter::Register::virtual_accumulator(),
+ iterator_.GetRegisterOperand(0));
+}
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void MaglevGraphBuilder::Visit##Name() { \
+ MoveNodeBetweenRegisters( \
+ interpreter::Register::virtual_accumulator(), \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name)); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
void MaglevGraphBuilder::VisitMov() {
- StoreRegister(
- iterator_.GetRegisterOperand(1), LoadRegister(0),
- bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
+ MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
+ iterator_.GetRegisterOperand(1));
}
MAGLEV_UNIMPLEMENTED_BYTECODE(PushContext)
MAGLEV_UNIMPLEMENTED_BYTECODE(PopContext)
@@ -200,6 +304,56 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndetectable)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestNull)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestUndefined)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestTypeOf)
+
+void MaglevGraphBuilder::BuildPropertyCellAccess(
+ const compiler::PropertyCellRef& property_cell) {
+ // TODO(leszeks): A bunch of this is copied from
+ // js-native-context-specialization.cc -- I wonder if we can unify it
+ // somehow.
+ bool was_cached = property_cell.Cache();
+ CHECK(was_cached);
+
+ compiler::ObjectRef property_cell_value = property_cell.value();
+ if (property_cell_value.IsTheHole()) {
+ // The property cell is no longer valid.
+ EmitUnconditionalDeopt();
+ return;
+ }
+
+ PropertyDetails property_details = property_cell.property_details();
+ PropertyCellType property_cell_type = property_details.cell_type();
+ DCHECK_EQ(PropertyKind::kData, property_details.kind());
+
+ if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+ SetAccumulator(GetConstant(property_cell_value));
+ return;
+ }
+
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_cell_type != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ broker()->dependencies()->DependOnGlobalProperty(property_cell);
+ }
+
+ // Load from constant/undefined global property can be constant-folded.
+ if (property_cell_type == PropertyCellType::kConstant ||
+ property_cell_type == PropertyCellType::kUndefined) {
+ SetAccumulator(GetConstant(property_cell_value));
+ return;
+ }
+
+ ValueNode* property_cell_node =
+ AddNewNode<Constant>({}, property_cell.AsHeapObject());
+ // TODO(leszeks): Padding a LoadHandler to LoadField here is a bit of
+ // a hack, maybe we should have a LoadRawOffset or similar.
+ SetAccumulator(AddNewNode<LoadField>(
+ {property_cell_node},
+ LoadSimpleFieldHandler(FieldIndex::ForInObjectOffset(
+ PropertyCell::kValueOffset, FieldIndex::kTagged))));
+}
+
void MaglevGraphBuilder::VisitLdaGlobal() {
// LdaGlobal <name_index> <slot>
@@ -207,13 +361,26 @@ void MaglevGraphBuilder::VisitLdaGlobal() {
static const int kSlotOperandIndex = 1;
compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
- FeedbackSlot slot_index = GetSlotOperand(kSlotOperandIndex);
- ValueNode* context = GetContext();
+ const compiler::ProcessedFeedback& access_feedback =
+ broker()->GetFeedbackForGlobalAccess(compiler::FeedbackSource(
+ feedback(), GetSlotOperand(kSlotOperandIndex)));
+
+ if (access_feedback.IsInsufficient()) {
+ EmitUnconditionalDeopt();
+ return;
+ }
+
+ const compiler::GlobalAccessFeedback& global_access_feedback =
+ access_feedback.AsGlobalAccess();
- USE(slot_index); // TODO(v8:7700): Use the feedback info.
+ if (global_access_feedback.IsPropertyCell()) {
+ BuildPropertyCellAccess(global_access_feedback.property_cell());
+ } else {
+ // TODO(leszeks): Handle the IsScriptContextSlot case.
- SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
- MarkPossibleSideEffect();
+ ValueNode* context = GetContext();
+ SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
+ }
}
MAGLEV_UNIMPLEMENTED_BYTECODE(LdaGlobalInsideTypeof)
MAGLEV_UNIMPLEMENTED_BYTECODE(StaGlobal)
@@ -228,35 +395,51 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(LdaLookupGlobalSlotInsideTypeof)
MAGLEV_UNIMPLEMENTED_BYTECODE(StaLookupSlot)
void MaglevGraphBuilder::VisitGetNamedProperty() {
// GetNamedProperty <object> <name_index> <slot>
- ValueNode* object = LoadRegister(0);
- FeedbackNexus nexus = feedback_nexus(2);
-
- if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
- EnsureCheckpoint();
- AddNewNode<SoftDeopt>({});
- } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
- std::vector<MapAndHandler> maps_and_handlers;
- nexus.ExtractMapsAndHandlers(&maps_and_handlers);
- DCHECK_EQ(maps_and_handlers.size(), 1);
- MapAndHandler& map_and_handler = maps_and_handlers[0];
- if (map_and_handler.second->IsSmi()) {
- int handler = map_and_handler.second->ToSmi().value();
- LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
- if (kind == LoadHandler::Kind::kField &&
- !LoadHandler::IsWasmStructBits::decode(handler)) {
- EnsureCheckpoint();
- AddNewNode<CheckMaps>({object},
- MakeRef(broker(), map_and_handler.first));
- SetAccumulator(AddNewNode<LoadField>({object}, handler));
- return;
+ ValueNode* object = LoadRegisterTaggedValue(0);
+ compiler::NameRef name = GetRefOperand<Name>(1);
+ FeedbackSlot slot = GetSlotOperand(2);
+ compiler::FeedbackSource feedback_source{feedback(), slot};
+
+ const compiler::ProcessedFeedback& processed_feedback =
+ broker()->GetFeedbackForPropertyAccess(feedback_source,
+ compiler::AccessMode::kLoad, name);
+
+ switch (processed_feedback.kind()) {
+ case compiler::ProcessedFeedback::kInsufficient:
+ EmitUnconditionalDeopt();
+ return;
+
+ case compiler::ProcessedFeedback::kNamedAccess: {
+ const compiler::NamedAccessFeedback& named_feedback =
+ processed_feedback.AsNamedAccess();
+ if (named_feedback.maps().size() == 1) {
+ // Monomorphic load, check the handler.
+ // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
+ MaybeObjectHandle handler =
+ FeedbackNexusForSlot(slot).FindHandlerForMap(
+ named_feedback.maps()[0].object());
+ if (!handler.is_null() && handler->IsSmi()) {
+ // Smi handler, emit a map check and LoadField.
+ int smi_handler = handler->ToSmi().value();
+ LoadHandler::Kind kind = LoadHandler::KindBits::decode(smi_handler);
+ if (kind == LoadHandler::Kind::kField &&
+ !LoadHandler::IsWasmStructBits::decode(smi_handler)) {
+ AddNewNode<CheckMaps>({object}, named_feedback.maps()[0]);
+ SetAccumulator(AddNewNode<LoadField>({object}, smi_handler));
+ return;
+ }
+ }
}
- }
+ } break;
+
+ default:
+ break;
}
+ // Create a generic load in the fallthrough.
ValueNode* context = GetContext();
- compiler::NameRef name = GetRefOperand<Name>(1);
- SetAccumulator(AddNewNode<LoadNamedGeneric>({context, object}, name));
- MarkPossibleSideEffect();
+ SetAccumulator(
+ AddNewNode<LoadNamedGeneric>({context, object}, name, feedback_source));
}
MAGLEV_UNIMPLEMENTED_BYTECODE(GetNamedPropertyFromSuper)
@@ -266,29 +449,44 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(StaModuleVariable)
void MaglevGraphBuilder::VisitSetNamedProperty() {
// SetNamedProperty <object> <name_index> <slot>
- ValueNode* object = LoadRegister(0);
- FeedbackNexus nexus = feedback_nexus(2);
-
- if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
- EnsureCheckpoint();
- AddNewNode<SoftDeopt>({});
- } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
- std::vector<MapAndHandler> maps_and_handlers;
- nexus.ExtractMapsAndHandlers(&maps_and_handlers);
- DCHECK_EQ(maps_and_handlers.size(), 1);
- MapAndHandler& map_and_handler = maps_and_handlers[0];
- if (map_and_handler.second->IsSmi()) {
- int handler = map_and_handler.second->ToSmi().value();
- StoreHandler::Kind kind = StoreHandler::KindBits::decode(handler);
- if (kind == StoreHandler::Kind::kField) {
- EnsureCheckpoint();
- AddNewNode<CheckMaps>({object},
- MakeRef(broker(), map_and_handler.first));
- ValueNode* value = GetAccumulator();
- AddNewNode<StoreField>({object, value}, handler);
- return;
+ ValueNode* object = LoadRegisterTaggedValue(0);
+ compiler::NameRef name = GetRefOperand<Name>(1);
+ FeedbackSlot slot = GetSlotOperand(2);
+ compiler::FeedbackSource feedback_source{feedback(), slot};
+
+ const compiler::ProcessedFeedback& processed_feedback =
+ broker()->GetFeedbackForPropertyAccess(
+ feedback_source, compiler::AccessMode::kStore, name);
+
+ switch (processed_feedback.kind()) {
+ case compiler::ProcessedFeedback::kInsufficient:
+ EmitUnconditionalDeopt();
+ return;
+
+ case compiler::ProcessedFeedback::kNamedAccess: {
+ const compiler::NamedAccessFeedback& named_feedback =
+ processed_feedback.AsNamedAccess();
+ if (named_feedback.maps().size() == 1) {
+ // Monomorphic store, check the handler.
+ // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
+ MaybeObjectHandle handler =
+ FeedbackNexusForSlot(slot).FindHandlerForMap(
+ named_feedback.maps()[0].object());
+ if (!handler.is_null() && handler->IsSmi()) {
+ int smi_handler = handler->ToSmi().value();
+ StoreHandler::Kind kind = StoreHandler::KindBits::decode(smi_handler);
+ if (kind == StoreHandler::Kind::kField) {
+ AddNewNode<CheckMaps>({object}, named_feedback.maps()[0]);
+ ValueNode* value = GetAccumulatorTaggedValue();
+ AddNewNode<StoreField>({object, value}, smi_handler);
+ return;
+ }
+ }
}
- }
+ } break;
+
+ default:
+ break;
}
// TODO(victorgomes): Generic store.
@@ -337,18 +535,42 @@ void MaglevGraphBuilder::VisitShiftRightLogical() {
VisitBinaryOperation<Operation::kShiftRightLogical>();
}
-MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(DivSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ModSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ExpSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseOrSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseXorSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi)
-MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi)
+void MaglevGraphBuilder::VisitAddSmi() {
+ VisitBinarySmiOperation<Operation::kAdd>();
+}
+void MaglevGraphBuilder::VisitSubSmi() {
+ VisitBinarySmiOperation<Operation::kSubtract>();
+}
+void MaglevGraphBuilder::VisitMulSmi() {
+ VisitBinarySmiOperation<Operation::kMultiply>();
+}
+void MaglevGraphBuilder::VisitDivSmi() {
+ VisitBinarySmiOperation<Operation::kDivide>();
+}
+void MaglevGraphBuilder::VisitModSmi() {
+ VisitBinarySmiOperation<Operation::kModulus>();
+}
+void MaglevGraphBuilder::VisitExpSmi() {
+ VisitBinarySmiOperation<Operation::kExponentiate>();
+}
+void MaglevGraphBuilder::VisitBitwiseOrSmi() {
+ VisitBinarySmiOperation<Operation::kBitwiseOr>();
+}
+void MaglevGraphBuilder::VisitBitwiseXorSmi() {
+ VisitBinarySmiOperation<Operation::kBitwiseXor>();
+}
+void MaglevGraphBuilder::VisitBitwiseAndSmi() {
+ VisitBinarySmiOperation<Operation::kBitwiseAnd>();
+}
+void MaglevGraphBuilder::VisitShiftLeftSmi() {
+ VisitBinarySmiOperation<Operation::kShiftLeft>();
+}
+void MaglevGraphBuilder::VisitShiftRightSmi() {
+ VisitBinarySmiOperation<Operation::kShiftRight>();
+}
+void MaglevGraphBuilder::VisitShiftRightLogicalSmi() {
+ VisitBinarySmiOperation<Operation::kShiftRightLogical>();
+}
void MaglevGraphBuilder::VisitInc() {
VisitUnaryOperation<Operation::kIncrement>();
@@ -369,59 +591,95 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf)
MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertyStrict)
MAGLEV_UNIMPLEMENTED_BYTECODE(DeletePropertySloppy)
MAGLEV_UNIMPLEMENTED_BYTECODE(GetSuperConstructor)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallAnyReceiver)
-// TODO(leszeks): For all of these:
-// a) Read feedback and implement inlining
-// b) Wrap in a helper.
-void MaglevGraphBuilder::VisitCallProperty() {
- ValueNode* function = LoadRegister(0);
+// TODO(v8:7700): Read feedback and implement inlining
+void MaglevGraphBuilder::BuildCallFromRegisterList(
+ ConvertReceiverMode receiver_mode) {
+ ValueNode* function = LoadRegisterTaggedValue(0);
interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
ValueNode* context = GetContext();
- static constexpr int kTheContext = 1;
- CallProperty* call_property = AddNewNode<CallProperty>(
- args.register_count() + kTheContext, function, context);
- // TODO(leszeks): Move this for loop into the CallProperty constructor,
- // pre-size the args array.
+ size_t input_count = args.register_count() + Call::kFixedInputCount;
+
+ RootConstant* undefined_constant;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // The undefined constant node has to be created before the call node.
+ undefined_constant =
+ AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
+ input_count++;
+ }
+
+ Call* call = AddNewNode<Call>(input_count, receiver_mode, function, context);
+ int arg_index = 0;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ call->set_arg(arg_index++, undefined_constant);
+ }
for (int i = 0; i < args.register_count(); ++i) {
- call_property->set_arg(i, current_interpreter_frame_.get(args[i]));
+ call->set_arg(arg_index++, current_interpreter_frame_.get(args[i]));
}
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+
+ SetAccumulator(call);
}
-void MaglevGraphBuilder::VisitCallProperty0() {
- ValueNode* function = LoadRegister(0);
+
+void MaglevGraphBuilder::BuildCallFromRegisters(
+ int argc_count, ConvertReceiverMode receiver_mode) {
+ DCHECK_LE(argc_count, 2);
+ ValueNode* function = LoadRegisterTaggedValue(0);
ValueNode* context = GetContext();
- CallProperty* call_property =
- AddNewNode<CallProperty>({function, context, LoadRegister(1)});
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+ int argc_count_with_recv = argc_count + 1;
+ size_t input_count = argc_count_with_recv + Call::kFixedInputCount;
+
+ // The undefined constant node has to be created before the call node.
+ RootConstant* undefined_constant;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ undefined_constant =
+ AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
+ }
+
+ Call* call = AddNewNode<Call>(input_count, receiver_mode, function, context);
+ int arg_index = 0;
+ int reg_count = argc_count_with_recv;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ reg_count = argc_count;
+ call->set_arg(arg_index++, undefined_constant);
+ }
+ for (int i = 0; i < reg_count; i++) {
+ call->set_arg(arg_index++, LoadRegisterTaggedValue(i + 1));
+ }
+
+ SetAccumulator(call);
}
-void MaglevGraphBuilder::VisitCallProperty1() {
- ValueNode* function = LoadRegister(0);
- ValueNode* context = GetContext();
- CallProperty* call_property = AddNewNode<CallProperty>(
- {function, context, LoadRegister(1), LoadRegister(2)});
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+void MaglevGraphBuilder::VisitCallAnyReceiver() {
+ BuildCallFromRegisterList(ConvertReceiverMode::kAny);
+}
+void MaglevGraphBuilder::VisitCallProperty() {
+ BuildCallFromRegisterList(ConvertReceiverMode::kNotNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallProperty0() {
+ BuildCallFromRegisters(0, ConvertReceiverMode::kNotNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallProperty1() {
+ BuildCallFromRegisters(1, ConvertReceiverMode::kNotNullOrUndefined);
}
void MaglevGraphBuilder::VisitCallProperty2() {
- ValueNode* function = LoadRegister(0);
- ValueNode* context = GetContext();
-
- CallProperty* call_property = AddNewNode<CallProperty>(
- {function, context, LoadRegister(1), LoadRegister(2), LoadRegister(3)});
- SetAccumulator(call_property);
- MarkPossibleSideEffect();
+ BuildCallFromRegisters(2, ConvertReceiverMode::kNotNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver() {
+ BuildCallFromRegisterList(ConvertReceiverMode::kNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver0() {
+ BuildCallFromRegisters(0, ConvertReceiverMode::kNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver1() {
+ BuildCallFromRegisters(1, ConvertReceiverMode::kNullOrUndefined);
+}
+void MaglevGraphBuilder::VisitCallUndefinedReceiver2() {
+ BuildCallFromRegisters(2, ConvertReceiverMode::kNullOrUndefined);
}
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver0)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver1)
-MAGLEV_UNIMPLEMENTED_BYTECODE(CallUndefinedReceiver2)
+
MAGLEV_UNIMPLEMENTED_BYTECODE(CallWithSpread)
MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntime)
MAGLEV_UNIMPLEMENTED_BYTECODE(CallRuntimeForPair)
@@ -429,9 +687,13 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(CallJSRuntime)
MAGLEV_UNIMPLEMENTED_BYTECODE(InvokeIntrinsic)
MAGLEV_UNIMPLEMENTED_BYTECODE(Construct)
MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
-MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual)
-MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict)
+void MaglevGraphBuilder::VisitTestEqual() {
+ VisitBinaryOperation<Operation::kEqual>();
+}
+void MaglevGraphBuilder::VisitTestEqualStrict() {
+ VisitBinaryOperation<Operation::kStrictEqual>();
+}
void MaglevGraphBuilder::VisitTestLessThan() {
VisitBinaryOperation<Operation::kLessThan>();
}
@@ -531,8 +793,6 @@ void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
int false_target) {
- // TODO(verwaest): Materialize true/false in the respective environments.
- if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
BasicBlock* block = FinishBlock<BranchIfTrue>(next_offset(), {node},
&jump_targets_[true_target],
&jump_targets_[false_target]);
@@ -541,27 +801,25 @@ void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
int true_target,
int false_target) {
- // TODO(verwaest): Materialize true/false in the respective environments.
- if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
BasicBlock* block = FinishBlock<BranchIfToBooleanTrue>(
next_offset(), {node}, &jump_targets_[true_target],
&jump_targets_[false_target]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
- BuildBranchIfToBooleanTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
- next_offset());
+ BuildBranchIfToBooleanTrue(GetAccumulatorTaggedValue(),
+ iterator_.GetJumpTargetOffset(), next_offset());
}
void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
- BuildBranchIfToBooleanTrue(GetAccumulator(), next_offset(),
+ BuildBranchIfToBooleanTrue(GetAccumulatorTaggedValue(), next_offset(),
iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfTrue() {
- BuildBranchIfTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
- next_offset());
+ BuildBranchIfTrue(GetAccumulatorTaggedValue(),
+ iterator_.GetJumpTargetOffset(), next_offset());
}
void MaglevGraphBuilder::VisitJumpIfFalse() {
- BuildBranchIfTrue(GetAccumulator(), next_offset(),
+ BuildBranchIfTrue(GetAccumulatorTaggedValue(), next_offset(),
iterator_.GetJumpTargetOffset());
}
MAGLEV_UNIMPLEMENTED_BYTECODE(JumpIfNull)
@@ -580,7 +838,7 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(SetPendingMessage)
MAGLEV_UNIMPLEMENTED_BYTECODE(Throw)
MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow)
void MaglevGraphBuilder::VisitReturn() {
- FinishBlock<Return>(next_offset(), {GetAccumulator()});
+ FinishBlock<Return>(next_offset(), {GetAccumulatorTaggedValue()});
}
MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole)
MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowSuperNotCalledIfHole)
@@ -593,15 +851,6 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(GetIterator)
MAGLEV_UNIMPLEMENTED_BYTECODE(Debugger)
MAGLEV_UNIMPLEMENTED_BYTECODE(IncBlockCounter)
MAGLEV_UNIMPLEMENTED_BYTECODE(Abort)
-#define SHORT_STAR_VISITOR(Name, ...) \
- void MaglevGraphBuilder::Visit##Name() { \
- StoreRegister( \
- interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
- GetAccumulator(), \
- bytecode_analysis().GetOutLivenessFor(iterator_.current_offset())); \
- }
-SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
-#undef SHORT_STAR_VISITOR
void MaglevGraphBuilder::VisitWide() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitExtraWide() { UNREACHABLE(); }
diff --git a/deps/v8/src/maglev/maglev-graph-builder.h b/deps/v8/src/maglev/maglev-graph-builder.h
index da86b80841..80fe3df504 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.h
+++ b/deps/v8/src/maglev/maglev-graph-builder.h
@@ -7,10 +7,12 @@
#include <type_traits>
+#include "src/base/optional.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph.h"
@@ -23,7 +25,8 @@ namespace maglev {
class MaglevGraphBuilder {
public:
- explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit);
+ explicit MaglevGraphBuilder(LocalIsolate* local_isolate,
+ MaglevCompilationUnit* compilation_unit);
void Build() {
for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
@@ -31,6 +34,14 @@ class MaglevGraphBuilder {
// TODO(v8:7700): Clean up after all bytecodes are supported.
if (found_unsupported_bytecode()) break;
}
+
+ // During InterpreterFrameState merge points, we might emit CheckedSmiTags
+ // and add them unsafely to the basic blocks. This addition might break a
+ // list invariant (namely `tail_` might not point to the last element).
+ // We revalidate this invariant here in all basic blocks.
+ for (BasicBlock* block : *graph_) {
+ block->nodes().RevalidateTail();
+ }
}
Graph* graph() const { return graph_; }
@@ -61,6 +72,11 @@ class MaglevGraphBuilder {
BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
while (old_jump_targets != nullptr) {
BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
+ if (predecessor == nullptr) {
+ // We can have null predecessors if the predecessor is dead.
+ predecessor_index--;
+ continue;
+ }
ControlNode* control = predecessor->control_node();
if (control->Is<ConditionalControlNode>()) {
// CreateEmptyBlock automatically registers itself with the offset.
@@ -94,11 +110,60 @@ class MaglevGraphBuilder {
}
}
+ // Return true if the given offset is a merge point, i.e. there are jumps
+ // targetting it.
+ bool IsOffsetAMergePoint(int offset) {
+ return merge_states_[offset] != nullptr;
+ }
+
+ // Called when a block is killed by an unconditional eager deopt.
+ void EmitUnconditionalDeopt() {
+ // Create a block rather than calling finish, since we don't yet know the
+ // next block's offset before the loop skipping the rest of the bytecodes.
+ BasicBlock* block = CreateBlock<Deopt>({});
+ ResolveJumpsToBlockAtOffset(block, block_offset_);
+
+ // Skip any bytecodes remaining in the block, up to the next merge point.
+ while (!IsOffsetAMergePoint(iterator_.next_offset())) {
+ iterator_.Advance();
+ if (iterator_.done()) break;
+ }
+
+ // If there is control flow out of this block, we need to kill the merges
+ // into the control flow targets.
+ interpreter::Bytecode bytecode = iterator_.current_bytecode();
+ if (interpreter::Bytecodes::IsForwardJump(bytecode)) {
+ // Jumps merge into their target, and conditional jumps also merge into
+ // the fallthrough.
+ merge_states_[iterator_.GetJumpTargetOffset()]->MergeDead();
+ if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ merge_states_[iterator_.next_offset()]->MergeDead();
+ }
+ } else if (bytecode == interpreter::Bytecode::kJumpLoop) {
+ // JumpLoop merges into its loop header, which has to be treated specially
+ // by the merge..
+ merge_states_[iterator_.GetJumpTargetOffset()]->MergeDeadLoop();
+ } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
+ // Switches merge into their targets, and into the fallthrough.
+ for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
+ merge_states_[offset.target_offset]->MergeDead();
+ }
+ merge_states_[iterator_.next_offset()]->MergeDead();
+ } else if (!interpreter::Bytecodes::Returns(bytecode) &&
+ !interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
+ // Any other bytecode that doesn't return or throw will merge into the
+ // fallthrough.
+ merge_states_[iterator_.next_offset()]->MergeDead();
+ }
+ }
+
void VisitSingleBytecode() {
int offset = iterator_.current_offset();
if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
if (current_block_ != nullptr) {
- DCHECK(!current_block_->nodes().is_empty());
+ // TODO(leszeks): Re-evaluate this DCHECK, we might hit it if the only
+ // bytecodes in this basic block were only register juggling.
+ // DCHECK(!current_block_->nodes().is_empty());
FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
merge_states_[offset]->Merge(*compilation_unit_,
@@ -109,6 +174,10 @@ class MaglevGraphBuilder {
StartNewBlock(offset);
}
DCHECK_NOT_NULL(current_block_);
+#ifdef DEBUG
+ // Clear new nodes for the next VisitFoo
+ new_nodes_.clear();
+#endif
switch (iterator_.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@@ -125,37 +194,41 @@ class MaglevGraphBuilder {
template <typename NodeT>
NodeT* AddNode(NodeT* node) {
+ if (node->properties().is_required_when_unused()) {
+ MarkPossibleSideEffect();
+ }
current_block_->nodes().Add(node);
- return node;
- }
-
- template <typename NodeT, typename... Args>
- NodeT* NewNode(size_t input_count, Args&&... args) {
- NodeT* node =
- Node::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
+#ifdef DEBUG
+ new_nodes_.insert(node);
+#endif
return node;
}
- template <Operation kOperation, typename... Args>
- ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
- Args&&... args);
-
template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) {
- return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
+ return AddNode(
+ CreateNewNode<NodeT>(input_count, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
- NodeT* NewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
- NodeT* node = Node::New<NodeT>(zone(), inputs, std::forward<Args>(args)...);
- if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
- return node;
+ NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
+ return AddNode(CreateNewNode<NodeT>(inputs, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
- NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
- return AddNode(NewNode<NodeT>(inputs, std::forward<Args>(args)...));
+ NodeT* CreateNewNode(Args&&... args) {
+ if constexpr (NodeT::kProperties.can_eager_deopt()) {
+ return NodeBase::New<NodeT>(zone(), *compilation_unit_,
+ GetLatestCheckpointedState(),
+ std::forward<Args>(args)...);
+ } else if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ return NodeBase::New<NodeT>(zone(), *compilation_unit_,
+ GetCheckpointedStateForLazyDeopt(),
+ std::forward<Args>(args)...);
+ } else {
+ return NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
+ }
}
ValueNode* GetContext() const {
@@ -170,48 +243,130 @@ class MaglevGraphBuilder {
template <class T, typename = std::enable_if_t<
std::is_convertible<T*, Object*>::value>>
typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
- return MakeRef(broker(),
- Handle<T>::cast(iterator_.GetConstantForIndexOperand(
- operand_index, isolate())));
+ // The BytecodeArray itself was fetched by using a barrier so all reads
+ // from the constant pool are safe.
+ return MakeRefAssumeMemoryFence(
+ broker(), broker()->CanonicalPersistentHandle(
+ Handle<T>::cast(iterator_.GetConstantForIndexOperand(
+ operand_index, local_isolate()))));
+ }
+
+ ValueNode* GetConstant(const compiler::ObjectRef& ref) {
+ if (ref.IsSmi()) {
+ return AddNewNode<SmiConstant>({}, Smi::FromInt(ref.AsSmi()));
+ }
+ // TODO(leszeks): Detect roots and use RootConstant.
+ return AddNewNode<Constant>({}, ref.AsHeapObject());
+ }
+
+ // Move an existing ValueNode between two registers. You can pass
+ // virtual_accumulator as the src or dst to move in or out of the accumulator.
+ void MoveNodeBetweenRegisters(interpreter::Register src,
+ interpreter::Register dst) {
+ // We shouldn't be moving newly created nodes between registers.
+ DCHECK_EQ(0, new_nodes_.count(current_interpreter_frame_.get(src)));
+ DCHECK_NOT_NULL(current_interpreter_frame_.get(src));
+
+ current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
+ }
+
+ ValueNode* GetTaggedValue(interpreter::Register reg) {
+ // TODO(victorgomes): Add the representation (Tagged/Untagged) in the
+ // InterpreterFrameState, so that we don't need to derefence a node.
+ ValueNode* value = current_interpreter_frame_.get(reg);
+ if (!value->is_untagged_value()) return value;
+ if (value->Is<CheckedSmiUntag>()) {
+ return value->input(0).node();
+ }
+ DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
+ ValueNode* tagged = AddNewNode<CheckedSmiTag>({value});
+ current_interpreter_frame_.set(reg, tagged);
+ return tagged;
}
- void SetAccumulator(ValueNode* node) {
- current_interpreter_frame_.set_accumulator(node);
+ ValueNode* GetSmiUntaggedValue(interpreter::Register reg) {
+ // TODO(victorgomes): Add the representation (Tagged/Untagged) in the
+ // InterpreterFrameState, so that we don't need to derefence a node.
+ ValueNode* value = current_interpreter_frame_.get(reg);
+ if (value->is_untagged_value()) return value;
+ if (value->Is<CheckedSmiTag>()) return value->input(0).node();
+ // Untag any other value.
+ ValueNode* untagged = AddNewNode<CheckedSmiUntag>({value});
+ current_interpreter_frame_.set(reg, untagged);
+ return untagged;
}
- ValueNode* GetAccumulator() const {
- return current_interpreter_frame_.accumulator();
+ ValueNode* GetAccumulatorTaggedValue() {
+ return GetTaggedValue(interpreter::Register::virtual_accumulator());
}
- ValueNode* LoadRegister(int operand_index) {
+ ValueNode* GetAccumulatorSmiUntaggedValue() {
+ return GetSmiUntaggedValue(interpreter::Register::virtual_accumulator());
+ }
+
+ bool IsRegisterEqualToAccumulator(int operand_index) {
interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
- return current_interpreter_frame_.get(source);
+ return current_interpreter_frame_.get(source) ==
+ current_interpreter_frame_.accumulator();
}
- void StoreRegister(interpreter::Register target, ValueNode* value,
- const compiler::BytecodeLivenessState* liveness) {
- if (target.index() >= 0 && !liveness->RegisterIsLive(target.index())) {
- return;
- }
+ ValueNode* LoadRegisterTaggedValue(int operand_index) {
+ return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
+ }
+
+ ValueNode* LoadRegisterSmiUntaggedValue(int operand_index) {
+ return GetSmiUntaggedValue(iterator_.GetRegisterOperand(operand_index));
+ }
+
+ template <typename NodeT>
+ void SetAccumulator(NodeT* node) {
+ // Accumulator stores are equivalent to stores to the virtual accumulator
+ // register.
+ StoreRegister(interpreter::Register::virtual_accumulator(), node);
+ }
+
+ template <typename NodeT>
+ void StoreRegister(interpreter::Register target, NodeT* value) {
+ // We should only set register values to nodes that were newly created in
+ // this Visit. Existing nodes should be moved between registers with
+ // MoveNodeBetweenRegisters.
+ DCHECK_NE(0, new_nodes_.count(value));
+ MarkAsLazyDeoptResult(value, target);
current_interpreter_frame_.set(target, value);
- AddNewNode<StoreToFrame>({}, value, target);
}
- void AddCheckpoint() {
- // TODO(v8:7700): Verify this calls the initializer list overload.
- AddNewNode<Checkpoint>({}, iterator_.current_offset(),
- GetInLiveness()->AccumulatorIsLive(),
- GetAccumulator());
- has_valid_checkpoint_ = true;
+ CheckpointedInterpreterState GetLatestCheckpointedState() {
+ if (!latest_checkpointed_state_) {
+ latest_checkpointed_state_.emplace(
+ BytecodeOffset(iterator_.current_offset()),
+ zone()->New<CompactInterpreterFrameState>(
+ *compilation_unit_, GetInLiveness(), current_interpreter_frame_));
+ }
+ return *latest_checkpointed_state_;
}
- void EnsureCheckpoint() {
- if (!has_valid_checkpoint_) AddCheckpoint();
+ CheckpointedInterpreterState GetCheckpointedStateForLazyDeopt() {
+ return CheckpointedInterpreterState(
+ BytecodeOffset(iterator_.current_offset()),
+ zone()->New<CompactInterpreterFrameState>(
+ *compilation_unit_, GetOutLiveness(), current_interpreter_frame_));
+ }
+
+ template <typename NodeT>
+ void MarkAsLazyDeoptResult(NodeT* value,
+ interpreter::Register result_location) {
+ DCHECK_EQ(NodeT::kProperties.can_lazy_deopt(),
+ value->properties().can_lazy_deopt());
+ if constexpr (NodeT::kProperties.can_lazy_deopt()) {
+ DCHECK(result_location.is_valid());
+ DCHECK(!value->lazy_deopt_info()->result_location.is_valid());
+ value->lazy_deopt_info()->result_location = result_location;
+ }
}
void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint.
- has_valid_checkpoint_ = false;
+ latest_checkpointed_state_.reset();
}
int next_offset() const {
@@ -233,8 +388,8 @@ class MaglevGraphBuilder {
template <typename ControlNodeT, typename... Args>
BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
- current_block_->set_control_node(NodeBase::New<ControlNodeT>(
- zone(), control_inputs, std::forward<Args>(args)...));
+ current_block_->set_control_node(CreateNewNode<ControlNodeT>(
+ control_inputs, std::forward<Args>(args)...));
BasicBlock* block = current_block_;
current_block_ = nullptr;
@@ -246,51 +401,65 @@ class MaglevGraphBuilder {
return block;
}
+ // Update all jumps which were targetting the not-yet-created block at the
+ // given `block_offset`, to now point to the given `block`.
+ void ResolveJumpsToBlockAtOffset(BasicBlock* block, int block_offset) const {
+ BasicBlockRef* jump_target_refs_head =
+ jump_targets_[block_offset].SetToBlockAndReturnNext(block);
+ while (jump_target_refs_head != nullptr) {
+ jump_target_refs_head =
+ jump_target_refs_head->SetToBlockAndReturnNext(block);
+ }
+ DCHECK_EQ(jump_targets_[block_offset].block_ptr(), block);
+ }
+
template <typename ControlNodeT, typename... Args>
BasicBlock* FinishBlock(int next_block_offset,
std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
BasicBlock* block =
CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
-
- // Resolve pointers to this basic block.
- BasicBlockRef* jump_target_refs_head =
- jump_targets_[block_offset_].SetToBlockAndReturnNext(block);
- while (jump_target_refs_head != nullptr) {
- jump_target_refs_head =
- jump_target_refs_head->SetToBlockAndReturnNext(block);
- }
- DCHECK_EQ(jump_targets_[block_offset_].block_ptr(), block);
+ ResolveJumpsToBlockAtOffset(block, block_offset_);
// If the next block has merge states, then it's not a simple fallthrough,
// and we should reset the checkpoint validity.
if (merge_states_[next_block_offset] != nullptr) {
- has_valid_checkpoint_ = false;
+ latest_checkpointed_state_.reset();
}
// Start a new block for the fallthrough path, unless it's a merge point, in
// which case we merge our state into it. That merge-point could also be a
// loop header, in which case the merge state might not exist yet (if the
// only predecessors are this path and the JumpLoop).
+ DCHECK_NULL(current_block_);
if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
if (NumPredecessors(next_block_offset) == 1) {
StartNewBlock(next_block_offset);
} else {
- DCHECK_NULL(current_block_);
MergeIntoFrameState(block, next_block_offset);
}
}
return block;
}
+ void BuildCallFromRegisterList(ConvertReceiverMode receiver_mode);
+ void BuildCallFromRegisters(int argc_count,
+ ConvertReceiverMode receiver_mode);
+
+ void BuildPropertyCellAccess(const compiler::PropertyCellRef& property_cell);
+
template <Operation kOperation>
void BuildGenericUnaryOperationNode();
template <Operation kOperation>
void BuildGenericBinaryOperationNode();
+ template <Operation kOperation>
+ void BuildGenericBinarySmiOperationNode();
template <Operation kOperation>
void VisitUnaryOperation();
template <Operation kOperation>
void VisitBinaryOperation();
+ template <Operation kOperation>
+ void VisitBinarySmiOperation();
void MergeIntoFrameState(BasicBlock* block, int target);
void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
@@ -332,10 +501,14 @@ class MaglevGraphBuilder {
const compiler::FeedbackVectorRef& feedback() const {
return compilation_unit_->feedback();
}
- const FeedbackNexus feedback_nexus(int slot_operand_index) const {
- // TODO(leszeks): Use JSHeapBroker here.
+ const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const {
return FeedbackNexus(feedback().object(),
- GetSlotOperand(slot_operand_index));
+ GetSlotOperand(slot_operand_index),
+ broker()->feedback_nexus_config());
+ }
+ const FeedbackNexus FeedbackNexusForSlot(FeedbackSlot slot) const {
+ return FeedbackNexus(feedback().object(), slot,
+ broker()->feedback_nexus_config());
}
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode();
@@ -343,7 +516,7 @@ class MaglevGraphBuilder {
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis();
}
- Isolate* isolate() const { return compilation_unit_->isolate(); }
+ LocalIsolate* local_isolate() const { return local_isolate_; }
Zone* zone() const { return compilation_unit_->zone(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); }
@@ -354,6 +527,7 @@ class MaglevGraphBuilder {
return compilation_unit_->graph_labeller();
}
+ LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const compilation_unit_;
interpreter::BytecodeArrayIterator iterator_;
uint32_t* predecessors_;
@@ -361,7 +535,7 @@ class MaglevGraphBuilder {
// Current block information.
BasicBlock* current_block_ = nullptr;
int block_offset_ = 0;
- bool has_valid_checkpoint_ = false;
+ base::Optional<CheckpointedInterpreterState> latest_checkpointed_state_;
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;
@@ -374,6 +548,10 @@ class MaglevGraphBuilder {
// TODO(v8:7700): Clean up after all bytecodes are supported.
bool found_unsupported_bytecode_ = false;
bool this_field_will_be_unused_once_all_bytecodes_are_supported_;
+
+#ifdef DEBUG
+ std::unordered_set<Node*> new_nodes_;
+#endif
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-graph-printer.cc b/deps/v8/src/maglev/maglev-graph-printer.cc
index ccd7bfbad8..61bdc8e417 100644
--- a/deps/v8/src/maglev/maglev-graph-printer.cc
+++ b/deps/v8/src/maglev/maglev-graph-printer.cc
@@ -211,8 +211,8 @@ int MaglevPrintingVisitorOstream::overflow(int c) {
MaglevPrintingVisitor::MaglevPrintingVisitor(std::ostream& os)
: os_(os),
- os_for_additional_info_(new MaglevPrintingVisitorOstream(os_, &targets)) {
-}
+ os_for_additional_info_(
+ new MaglevPrintingVisitorOstream(os_, &targets_)) {}
void MaglevPrintingVisitor::PreProcessGraph(
MaglevCompilationUnit* compilation_unit, Graph* graph) {
@@ -221,7 +221,7 @@ void MaglevPrintingVisitor::PreProcessGraph(
for (BasicBlock* block : *graph) {
if (block->control_node()->Is<JumpLoop>()) {
- loop_headers.insert(block->control_node()->Cast<JumpLoop>()->target());
+ loop_headers_.insert(block->control_node()->Cast<JumpLoop>()->target());
}
}
@@ -229,31 +229,31 @@ void MaglevPrintingVisitor::PreProcessGraph(
for (BlockConstIterator block_it = graph->begin(); block_it != graph->end();
++block_it) {
BasicBlock* block = *block_it;
- std::replace(targets.begin(), targets.end(), block,
+ std::replace(targets_.begin(), targets_.end(), block,
static_cast<BasicBlock*>(nullptr));
- if (loop_headers.find(block) != loop_headers.end()) {
- AddTarget(targets, block);
+ if (loop_headers_.find(block) != loop_headers_.end()) {
+ AddTarget(targets_, block);
}
ControlNode* node = block->control_node();
if (node->Is<JumpLoop>()) {
BasicBlock* target = node->Cast<JumpLoop>()->target();
- std::replace(targets.begin(), targets.end(), target,
+ std::replace(targets_.begin(), targets_.end(), target,
static_cast<BasicBlock*>(nullptr));
} else if (node->Is<UnconditionalControlNode>()) {
- AddTargetIfNotNext(targets,
+ AddTargetIfNotNext(targets_,
node->Cast<UnconditionalControlNode>()->target(),
*(block_it + 1));
} else if (node->Is<ConditionalControlNode>()) {
- AddTargetIfNotNext(targets,
+ AddTargetIfNotNext(targets_,
node->Cast<ConditionalControlNode>()->if_true(),
*(block_it + 1));
- AddTargetIfNotNext(targets,
+ AddTargetIfNotNext(targets_,
node->Cast<ConditionalControlNode>()->if_false(),
*(block_it + 1));
}
}
- DCHECK(std::all_of(targets.begin(), targets.end(),
+ DCHECK(std::all_of(targets_.begin(), targets_.end(),
[](BasicBlock* block) { return block == nullptr; }));
}
@@ -262,19 +262,19 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
MaglevGraphLabeller* graph_labeller = compilation_unit->graph_labeller();
size_t loop_position = static_cast<size_t>(-1);
- if (loop_headers.erase(block) > 0) {
- loop_position = AddTarget(targets, block);
+ if (loop_headers_.erase(block) > 0) {
+ loop_position = AddTarget(targets_, block);
}
{
bool saw_start = false;
- for (size_t i = 0; i < targets.size(); ++i) {
+ for (size_t i = 0; i < targets_.size(); ++i) {
Connection c;
if (saw_start) {
c.AddHorizontal();
}
// If this is one of the arrows pointing to this block, terminate the
// line by connecting it rightwards.
- if (targets[i] == block) {
+ if (targets_[i] == block) {
c.Connect(kRight);
// If this is the loop header, go down instead of up and don't clear
// the target.
@@ -282,10 +282,10 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
c.Connect(kBottom);
} else {
c.Connect(kTop);
- targets[i] = nullptr;
+ targets_[i] = nullptr;
}
saw_start = true;
- } else if (c.connected == 0 && targets[i] != nullptr) {
+ } else if (c.connected == 0 && targets_[i] != nullptr) {
// If this is another arrow, connect it, but only if that doesn't
// clobber any existing drawing.
c.AddVertical();
@@ -301,10 +301,99 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)->set_padding(1);
}
+namespace {
+
+template <typename NodeT>
+void PrintEagerDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeT* node, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ PrintVerticalArrows(os, targets);
+ PrintPadding(os, graph_labeller, 0);
+
+ EagerDeoptInfo* deopt_info = node->eager_deopt_info();
+ os << " ↱ eager @" << deopt_info->state.bytecode_position << " : {";
+ bool first = true;
+ int index = 0;
+ deopt_info->state.register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << reg.ToString() << ":" << PrintNodeLabel(graph_labeller, node)
+ << ":" << deopt_info->input_locations[index].operand();
+ index++;
+ });
+ os << "}\n";
+}
+void MaybePrintEagerDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+#define CASE(Name) \
+ case Opcode::k##Name: \
+ if constexpr (Name::kProperties.can_eager_deopt()) { \
+ PrintEagerDeopt<Name>(os, targets, node->Cast<Name>(), state); \
+ } \
+ break;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+}
+
+template <typename NodeT>
+void PrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeT* node, const ProcessingState& state) {
+ MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+
+ PrintVerticalArrows(os, targets);
+ PrintPadding(os, graph_labeller, 0);
+
+ LazyDeoptInfo* deopt_info = node->lazy_deopt_info();
+ os << " ↳ lazy @" << deopt_info->state.bytecode_position << " : {";
+ bool first = true;
+ int index = 0;
+ deopt_info->state.register_frame->ForEachValue(
+ *state.compilation_unit(),
+ [&](ValueNode* node, interpreter::Register reg) {
+ if (first) {
+ first = false;
+ } else {
+ os << ", ";
+ }
+ os << reg.ToString() << ":";
+ if (reg == deopt_info->result_location) {
+ os << "<result>";
+ } else {
+ os << PrintNodeLabel(graph_labeller, node) << ":"
+ << deopt_info->input_locations[index].operand();
+ }
+ index++;
+ });
+ os << "}\n";
+}
+void MaybePrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
+ NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+#define CASE(Name) \
+ case Opcode::k##Name: \
+ if constexpr (Name::kProperties.can_lazy_deopt()) { \
+ PrintLazyDeopt<Name>(os, targets, node->Cast<Name>(), state); \
+ } \
+ break;
+ NODE_BASE_LIST(CASE)
+#undef CASE
+ }
+}
+
+} // namespace
+
void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, phi);
os_ << "Phi (";
// Manually walk Phi inputs to print just the node labels, without
@@ -312,7 +401,11 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
// moves).
for (int i = 0; i < phi->input_count(); ++i) {
if (i > 0) os_ << ", ";
- os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
+ if (state.block()->predecessor_at(i) == nullptr) {
+ os_ << "<dead>";
+ } else {
+ os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
+ }
}
os_ << ") → " << phi->result().operand() << "\n";
@@ -322,27 +415,34 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
- PrintVerticalArrows(os_, targets);
+
+ MaybePrintEagerDeopt(os_, targets_, node, state);
+
+ PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, node);
os_ << PrintNode(graph_labeller, node) << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(graph_labeller->max_node_id_width() + 4);
+
+ MaybePrintLazyDeopt(os_, targets_, node, state);
}
void MaglevPrintingVisitor::Process(ControlNode* control_node,
const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
+ MaybePrintEagerDeopt(os_, targets_, control_node, state);
+
bool has_fallthrough = false;
if (control_node->Is<JumpLoop>()) {
BasicBlock* target = control_node->Cast<JumpLoop>()->target();
- PrintVerticalArrows(os_, targets, {}, {target}, true);
+ PrintVerticalArrows(os_, targets_, {}, {target}, true);
os_ << "◄─";
PrintPaddedId(os_, graph_labeller, control_node, "─", -2);
- std::replace(targets.begin(), targets.end(), target,
+ std::replace(targets_.begin(), targets_.end(), target,
static_cast<BasicBlock*>(nullptr));
} else if (control_node->Is<UnconditionalControlNode>()) {
@@ -350,9 +450,9 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
control_node->Cast<UnconditionalControlNode>()->target();
std::set<size_t> arrows_starting_here;
- has_fallthrough |= !AddTargetIfNotNext(targets, target, state.next_block(),
+ has_fallthrough |= !AddTargetIfNotNext(targets_, target, state.next_block(),
&arrows_starting_here);
- PrintVerticalArrows(os_, targets, arrows_starting_here);
+ PrintVerticalArrows(os_, targets_, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, control_node,
has_fallthrough ? " " : "─");
@@ -364,14 +464,14 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
std::set<size_t> arrows_starting_here;
has_fallthrough |= !AddTargetIfNotNext(
- targets, false_target, state.next_block(), &arrows_starting_here);
+ targets_, false_target, state.next_block(), &arrows_starting_here);
has_fallthrough |= !AddTargetIfNotNext(
- targets, true_target, state.next_block(), &arrows_starting_here);
- PrintVerticalArrows(os_, targets, arrows_starting_here);
+ targets_, true_target, state.next_block(), &arrows_starting_here);
+ PrintVerticalArrows(os_, targets_, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, control_node, "─");
} else {
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, control_node);
}
@@ -383,13 +483,13 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
control_node->Cast<UnconditionalControlNode>()->target();
if (target->has_phi()) {
printed_phis = true;
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPadding(os_, graph_labeller, -1);
os_ << (has_fallthrough ? "│" : " ");
os_ << " with gap moves:\n";
int pid = state.block()->predecessor_id();
for (Phi* phi : *target->phis()) {
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
PrintPadding(os_, graph_labeller, -1);
os_ << (has_fallthrough ? "│" : " ");
os_ << " - ";
@@ -400,7 +500,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
}
}
- PrintVerticalArrows(os_, targets);
+ PrintVerticalArrows(os_, targets_);
if (has_fallthrough) {
PrintPadding(os_, graph_labeller, -1);
if (printed_phis) {
diff --git a/deps/v8/src/maglev/maglev-graph-printer.h b/deps/v8/src/maglev/maglev-graph-printer.h
index d416293d08..6250727460 100644
--- a/deps/v8/src/maglev/maglev-graph-printer.h
+++ b/deps/v8/src/maglev/maglev-graph-printer.h
@@ -26,9 +26,6 @@ class ProcessingState;
class MaglevPrintingVisitor {
public:
- // Could be interesting to print checkpoints too.
- static constexpr bool kNeedsCheckpointStates = false;
-
explicit MaglevPrintingVisitor(std::ostream& os);
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
@@ -43,8 +40,8 @@ class MaglevPrintingVisitor {
private:
std::ostream& os_;
std::unique_ptr<std::ostream> os_for_additional_info_;
- std::set<BasicBlock*> loop_headers;
- std::vector<BasicBlock*> targets;
+ std::set<BasicBlock*> loop_headers_;
+ std::vector<BasicBlock*> targets_;
};
void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
diff --git a/deps/v8/src/maglev/maglev-graph-processor.h b/deps/v8/src/maglev/maglev-graph-processor.h
index 892fe6071b..557d969b47 100644
--- a/deps/v8/src/maglev/maglev-graph-processor.h
+++ b/deps/v8/src/maglev/maglev-graph-processor.h
@@ -24,10 +24,6 @@ namespace maglev {
//
// It expects a NodeProcessor class with:
//
-// // True if the GraphProcessor should snapshot Checkpoint states for
-// // deopting nodes.
-// static constexpr bool kNeedsCheckpointStates;
-//
// // A function that processes the graph before the nodes are walked.
// void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
//
@@ -50,15 +46,8 @@ class GraphProcessor;
class ProcessingState {
public:
explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
- BlockConstIterator block_it,
- const InterpreterFrameState* interpreter_frame_state,
- const Checkpoint* checkpoint,
- const InterpreterFrameState* checkpoint_frame_state)
- : compilation_unit_(compilation_unit),
- block_it_(block_it),
- interpreter_frame_state_(interpreter_frame_state),
- checkpoint_(checkpoint),
- checkpoint_frame_state_(checkpoint_frame_state) {}
+ BlockConstIterator block_it)
+ : compilation_unit_(compilation_unit), block_it_(block_it) {}
// Disallow copies, since the underlying frame states stay mutable.
ProcessingState(const ProcessingState&) = delete;
@@ -67,20 +56,7 @@ class ProcessingState {
BasicBlock* block() const { return *block_it_; }
BasicBlock* next_block() const { return *(block_it_ + 1); }
- const InterpreterFrameState* interpreter_frame_state() const {
- DCHECK_NOT_NULL(interpreter_frame_state_);
- return interpreter_frame_state_;
- }
-
- const Checkpoint* checkpoint() const {
- DCHECK_NOT_NULL(checkpoint_);
- return checkpoint_;
- }
-
- const InterpreterFrameState* checkpoint_frame_state() const {
- DCHECK_NOT_NULL(checkpoint_frame_state_);
- return checkpoint_frame_state_;
- }
+ MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
int register_count() const { return compilation_unit_->register_count(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
@@ -92,27 +68,16 @@ class ProcessingState {
private:
MaglevCompilationUnit* compilation_unit_;
BlockConstIterator block_it_;
- const InterpreterFrameState* interpreter_frame_state_;
- const Checkpoint* checkpoint_;
- const InterpreterFrameState* checkpoint_frame_state_;
};
template <typename NodeProcessor>
class GraphProcessor {
public:
- static constexpr bool kNeedsCheckpointStates =
- NodeProcessor::kNeedsCheckpointStates;
-
template <typename... Args>
explicit GraphProcessor(MaglevCompilationUnit* compilation_unit,
Args&&... args)
: compilation_unit_(compilation_unit),
- node_processor_(std::forward<Args>(args)...),
- current_frame_state_(*compilation_unit_) {
- if (kNeedsCheckpointStates) {
- checkpoint_state_.emplace(*compilation_unit_);
- }
- }
+ node_processor_(std::forward<Args>(args)...) {}
void ProcessGraph(Graph* graph) {
graph_ = graph;
@@ -124,14 +89,6 @@ class GraphProcessor {
node_processor_.PreProcessBasicBlock(compilation_unit_, block);
- if (block->has_state()) {
- current_frame_state_.CopyFrom(*compilation_unit_, *block->state());
- if (kNeedsCheckpointStates) {
- checkpoint_state_->last_checkpoint_block_it = block_it_;
- checkpoint_state_->last_checkpoint_node_it = NodeConstIterator();
- }
- }
-
if (block->has_phi()) {
for (Phi* phi : *block->phis()) {
node_processor_.Process(phi, GetCurrentState());
@@ -155,11 +112,7 @@ class GraphProcessor {
private:
ProcessingState GetCurrentState() {
- return ProcessingState(
- compilation_unit_, block_it_, &current_frame_state_,
- kNeedsCheckpointStates ? checkpoint_state_->latest_checkpoint : nullptr,
- kNeedsCheckpointStates ? &checkpoint_state_->checkpoint_frame_state
- : nullptr);
+ return ProcessingState(compilation_unit_, block_it_);
}
void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
@@ -176,170 +129,6 @@ class GraphProcessor {
void PreProcess(NodeBase* node, const ProcessingState& state) {}
- void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
- current_frame_state_.set_accumulator(checkpoint->accumulator());
- if (kNeedsCheckpointStates) {
- checkpoint_state_->latest_checkpoint = checkpoint;
- if (checkpoint->is_used()) {
- checkpoint_state_->checkpoint_frame_state.CopyFrom(
- *compilation_unit_, current_frame_state_);
- checkpoint_state_->last_checkpoint_block_it = block_it_;
- checkpoint_state_->last_checkpoint_node_it = node_it_;
- ClearDeadCheckpointNodes();
- }
- }
- }
-
- void PreProcess(StoreToFrame* store_to_frame, const ProcessingState& state) {
- current_frame_state_.set(store_to_frame->target(), store_to_frame->value());
- }
-
- void PreProcess(SoftDeopt* node, const ProcessingState& state) {
- PreProcessDeoptingNode();
- }
-
- void PreProcess(CheckMaps* node, const ProcessingState& state) {
- PreProcessDeoptingNode();
- }
-
- void PreProcessDeoptingNode() {
- if (!kNeedsCheckpointStates) return;
-
- Checkpoint* checkpoint = checkpoint_state_->latest_checkpoint;
- if (checkpoint->is_used()) {
- DCHECK(!checkpoint_state_->last_checkpoint_node_it.is_null());
- DCHECK_EQ(checkpoint, *checkpoint_state_->last_checkpoint_node_it);
- return;
- }
- DCHECK_IMPLIES(!checkpoint_state_->last_checkpoint_node_it.is_null(),
- checkpoint != *checkpoint_state_->last_checkpoint_node_it);
-
- // TODO(leszeks): The following code is _ugly_, should figure out how to
- // clean it up.
-
- // Go to the previous state checkpoint (either on the Checkpoint that
- // provided the current checkpoint snapshot, or on a BasicBlock).
- BlockConstIterator block_it = checkpoint_state_->last_checkpoint_block_it;
- NodeConstIterator node_it = checkpoint_state_->last_checkpoint_node_it;
- if (node_it.is_null()) {
- // There was no recent enough Checkpoint node, and the block iterator
- // points at a basic block with a state snapshot. Copy that snapshot and
- // start iterating from there.
- BasicBlock* block = *block_it;
- DCHECK(block->has_state());
- checkpoint_state_->checkpoint_frame_state.CopyFrom(*compilation_unit_,
- *block->state());
-
- // Start iterating from the first node in the block.
- node_it = block->nodes().begin();
- } else {
- // The node iterator should point at the previous Checkpoint node. We
- // don't need that Checkpoint state snapshot anymore, we're making a new
- // one, so we can just reuse the snapshot as-is without copying it.
- DCHECK_NE(*node_it, checkpoint);
- DCHECK((*node_it)->Is<Checkpoint>());
- DCHECK((*node_it)->Cast<Checkpoint>()->is_used());
-
- // Advance it by one since we don't need to check this node anymore.
- ++node_it;
- }
-
- // Now walk forward to the checkpoint, and apply any StoreToFrame operations
- // along the way into the snapshotted checkpoint state.
- BasicBlock* block = *block_it;
- while (true) {
- // Check if we've run out of nodes in this block, and advance to the
- // next block if so.
- while (node_it == block->nodes().end()) {
- DCHECK_NE(block_it, graph_->end());
-
- // We should only end up visiting blocks with fallthrough to the next
- // block -- otherwise, the block should have had a frame state snapshot,
- // as either a merge block or a non-fallthrough jump target.
- if ((*block_it)->control_node()->Is<Jump>()) {
- DCHECK_EQ((*block_it)->control_node()->Cast<Jump>()->target(),
- *(block_it + 1));
- } else {
- DCHECK_IMPLIES((*block_it)
- ->control_node()
- ->Cast<ConditionalControlNode>()
- ->if_true() != *(block_it + 1),
- (*block_it)
- ->control_node()
- ->Cast<ConditionalControlNode>()
- ->if_false() != *(block_it + 1));
- }
-
- // Advance to the next block (which the above DCHECKs confirm is the
- // unconditional fallthrough from the previous block), and update the
- // cached block pointer.
- block_it++;
- block = *block_it;
-
- // We should never visit a block with state (aside from the very first
- // block we visit), since then that should have been our start point
- // to start with.
- DCHECK(!(*block_it)->has_state());
- node_it = (*block_it)->nodes().begin();
- }
-
- // We should never reach the current node, the "until" checkpoint node
- // should be before it.
- DCHECK_NE(node_it, node_it_);
-
- Node* node = *node_it;
-
- // Break once we hit the given Checkpoint node. This could be right at
- // the start of the iteration, if the BasicBlock held the snapshot and the
- // Checkpoint was the first node in it.
- if (node == checkpoint) break;
-
- // Update the state from the current node, if it's a state update.
- if (node->Is<StoreToFrame>()) {
- StoreToFrame* store_to_frame = node->Cast<StoreToFrame>();
- checkpoint_state_->checkpoint_frame_state.set(store_to_frame->target(),
- store_to_frame->value());
- } else {
- // Any checkpoints we meet along the way should be unused, otherwise
- // they should have provided the most recent state snapshot.
- DCHECK_IMPLIES(node->Is<Checkpoint>(),
- !node->Cast<Checkpoint>()->is_used());
- }
-
- // Continue to the next node.
- ++node_it;
- }
-
- checkpoint_state_->last_checkpoint_block_it = block_it;
- checkpoint_state_->last_checkpoint_node_it = node_it;
- checkpoint_state_->checkpoint_frame_state.set_accumulator(
- checkpoint->accumulator());
- ClearDeadCheckpointNodes();
- checkpoint->SetUsed();
- }
-
- // Walk the checkpointed state, and null out any values that are dead at this
- // checkpoint.
- // TODO(leszeks): Consider doing this on checkpoint copy, not as a
- // post-process step.
- void ClearDeadCheckpointNodes() {
- const compiler::BytecodeLivenessState* liveness =
- bytecode_analysis().GetInLivenessFor(
- checkpoint_state_->latest_checkpoint->bytecode_position());
- for (int i = 0; i < register_count(); ++i) {
- if (!liveness->RegisterIsLive(i)) {
- checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
- nullptr);
- }
- }
-
- // The accumulator is on the checkpoint node itself, and should have already
- // been nulled out during graph building if it's dead.
- DCHECK_EQ(
- !liveness->AccumulatorIsLive(),
- checkpoint_state_->checkpoint_frame_state.accumulator() == nullptr);
- }
-
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis();
@@ -350,19 +139,6 @@ class GraphProcessor {
Graph* graph_;
BlockConstIterator block_it_;
NodeConstIterator node_it_;
- InterpreterFrameState current_frame_state_;
-
- // The CheckpointState field only exists if the node processor needs
- // checkpoint states.
- struct CheckpointState {
- explicit CheckpointState(const MaglevCompilationUnit& compilation_unit)
- : checkpoint_frame_state(compilation_unit) {}
- Checkpoint* latest_checkpoint = nullptr;
- BlockConstIterator last_checkpoint_block_it;
- NodeConstIterator last_checkpoint_node_it;
- InterpreterFrameState checkpoint_frame_state;
- };
- base::Optional<CheckpointState> checkpoint_state_;
};
// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
@@ -373,8 +149,6 @@ class NodeMultiProcessor;
template <>
class NodeMultiProcessor<> {
public:
- static constexpr bool kNeedsCheckpointStates = false;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
@@ -387,9 +161,6 @@ class NodeMultiProcessor<Processor, Processors...>
using Base = NodeMultiProcessor<Processors...>;
public:
- static constexpr bool kNeedsCheckpointStates =
- Processor::kNeedsCheckpointStates || Base::kNeedsCheckpointStates;
-
template <typename Node>
void Process(Node* node, const ProcessingState& state) {
processor_.Process(node, state);
diff --git a/deps/v8/src/maglev/maglev-graph-verifier.h b/deps/v8/src/maglev/maglev-graph-verifier.h
new file mode 100644
index 0000000000..55bd4e89a5
--- /dev/null
+++ b/deps/v8/src/maglev/maglev-graph-verifier.h
@@ -0,0 +1,143 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAGLEV_MAGLEV_GRAPH_VERIFIER_H_
+#define V8_MAGLEV_MAGLEV_GRAPH_VERIFIER_H_
+
+#include "src/maglev/maglev-graph-labeller.h"
+#include "src/maglev/maglev-ir.h"
+
+namespace v8 {
+namespace internal {
+namespace maglev {
+
+std::ostream& operator<<(std::ostream& os, const ValueRepresentation& repr) {
+ switch (repr) {
+ case ValueRepresentation::kTagged:
+ os << "TaggedValue";
+ break;
+ case ValueRepresentation::kUntagged:
+ os << "UntaggedValue";
+ break;
+ }
+ return os;
+}
+
+class Graph;
+
+// TODO(victorgomes): Currently it only verifies the inputs for all ValueNodes
+// are expected to be tagged/untagged. Add more verification later.
+class MaglevGraphVerifier {
+ public:
+ void PreProcessGraph(MaglevCompilationUnit* compilation_unit, Graph* graph) {
+ if (compilation_unit->has_graph_labeller()) {
+ graph_labeller_ = compilation_unit->graph_labeller();
+ }
+ }
+
+ void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
+ void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
+
+ void CheckValueInputIs(NodeBase* node, int i, ValueRepresentation repr) {
+ ValueNode* input = node->input(i).node();
+ if (input->value_representation() != repr) {
+ std::ostringstream str;
+ str << "Type representation error: node ";
+ if (graph_labeller_) {
+ str << "#" << graph_labeller_->NodeId(node) << " : ";
+ }
+ str << node->opcode() << " (input @" << i << " = " << input->opcode()
+ << ") type " << input->value_representation() << " is not " << repr;
+ FATAL("%s", str.str().c_str());
+ }
+ }
+
+ void Process(NodeBase* node, const ProcessingState& state) {
+ switch (node->opcode()) {
+ case Opcode::kConstant:
+ case Opcode::kSmiConstant:
+ case Opcode::kInt32Constant:
+ case Opcode::kRootConstant:
+ case Opcode::kInitialValue:
+ case Opcode::kRegisterInput:
+ case Opcode::kGapMove:
+ case Opcode::kDeopt:
+ case Opcode::kJump:
+ case Opcode::kJumpLoop:
+ // No input.
+ DCHECK_EQ(node->input_count(), 0);
+ break;
+ case Opcode::kGenericNegate:
+ case Opcode::kGenericIncrement:
+ case Opcode::kGenericDecrement:
+ case Opcode::kCheckedSmiUntag:
+ case Opcode::kLoadField:
+ case Opcode::kLoadGlobal:
+ // TODO(victorgomes): Can we check that the input is actually a map?
+ case Opcode::kCheckMaps:
+ // TODO(victorgomes): Can we check that the input is Boolean?
+ case Opcode::kBranchIfTrue:
+ case Opcode::kBranchIfToBooleanTrue:
+ case Opcode::kReturn:
+ // Generic tagged unary operations.
+ DCHECK_EQ(node->input_count(), 1);
+ CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
+ break;
+ case Opcode::kCheckedSmiTag:
+ // Untagged unary operations.
+ CheckValueInputIs(node, 0, ValueRepresentation::kUntagged);
+ break;
+ case Opcode::kGenericAdd:
+ case Opcode::kGenericSubtract:
+ case Opcode::kGenericMultiply:
+ case Opcode::kGenericDivide:
+ case Opcode::kGenericModulus:
+ case Opcode::kGenericExponentiate:
+ case Opcode::kGenericBitwiseAnd:
+ case Opcode::kGenericBitwiseOr:
+ case Opcode::kGenericBitwiseXor:
+ case Opcode::kGenericShiftLeft:
+ case Opcode::kGenericShiftRight:
+ case Opcode::kGenericShiftRightLogical:
+ case Opcode::kGenericBitwiseNot:
+ // TODO(victorgomes): Can we use the fact that these nodes return a
+ // Boolean?
+ case Opcode::kGenericEqual:
+ case Opcode::kGenericStrictEqual:
+ case Opcode::kGenericLessThan:
+ case Opcode::kGenericLessThanOrEqual:
+ case Opcode::kGenericGreaterThan:
+ case Opcode::kGenericGreaterThanOrEqual:
+ // TODO(victorgomes): Can we check that first input is an Object?
+ case Opcode::kStoreField:
+ case Opcode::kLoadNamedGeneric:
+ // Generic tagged binary operations.
+ DCHECK_EQ(node->input_count(), 2);
+ CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
+ CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
+ break;
+ case Opcode::kInt32AddWithOverflow:
+ // Untagged binary operations.
+ CheckValueInputIs(node, 0, ValueRepresentation::kUntagged);
+ CheckValueInputIs(node, 1, ValueRepresentation::kUntagged);
+ break;
+ case Opcode::kCall:
+ case Opcode::kPhi:
+ // All inputs should be tagged.
+ for (int i = 0; i < node->input_count(); i++) {
+ CheckValueInputIs(node, i, ValueRepresentation::kTagged);
+ }
+ break;
+ }
+ }
+
+ private:
+ MaglevGraphLabeller* graph_labeller_ = nullptr;
+};
+
+} // namespace maglev
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAGLEV_MAGLEV_GRAPH_VERIFIER_H_
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.h b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
index 5a907607f9..a64b1b88c5 100644
--- a/deps/v8/src/maglev/maglev-interpreter-frame-state.h
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
@@ -10,6 +10,7 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
#include "src/maglev/maglev-register-frame-array.h"
@@ -29,26 +30,30 @@ class InterpreterFrameState {
InterpreterFrameState(const MaglevCompilationUnit& info,
const InterpreterFrameState& state)
- : accumulator_(state.accumulator_), frame_(info) {
+ : frame_(info) {
frame_.CopyFrom(info, state.frame_, nullptr);
}
void CopyFrom(const MaglevCompilationUnit& info,
const InterpreterFrameState& state) {
- accumulator_ = state.accumulator_;
frame_.CopyFrom(info, state.frame_, nullptr);
}
inline void CopyFrom(const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
- void set_accumulator(ValueNode* value) { accumulator_ = value; }
- ValueNode* accumulator() const { return accumulator_; }
+ void set_accumulator(ValueNode* value) {
+ frame_[interpreter::Register::virtual_accumulator()] = value;
+ }
+ ValueNode* accumulator() const {
+ return frame_[interpreter::Register::virtual_accumulator()];
+ }
void set(interpreter::Register reg, ValueNode* value) {
DCHECK_IMPLIES(reg.is_parameter(),
reg == interpreter::Register::current_context() ||
reg == interpreter::Register::function_closure() ||
+ reg == interpreter::Register::virtual_accumulator() ||
reg.ToParameterIndex() >= 0);
frame_[reg] = value;
}
@@ -56,6 +61,7 @@ class InterpreterFrameState {
DCHECK_IMPLIES(reg.is_parameter(),
reg == interpreter::Register::current_context() ||
reg == interpreter::Register::function_closure() ||
+ reg == interpreter::Register::virtual_accumulator() ||
reg.ToParameterIndex() >= 0);
return frame_[reg];
}
@@ -63,10 +69,120 @@ class InterpreterFrameState {
const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
private:
- ValueNode* accumulator_ = nullptr;
RegisterFrameArray<ValueNode*> frame_;
};
+class CompactInterpreterFrameState {
+ public:
+ CompactInterpreterFrameState(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness)
+ : live_registers_and_accumulator_(
+ info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
+ liveness_(liveness) {}
+
+ CompactInterpreterFrameState(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness,
+ const InterpreterFrameState& state)
+ : CompactInterpreterFrameState(info, liveness) {
+ ForEachValue(info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = state.get(reg);
+ });
+ }
+
+ CompactInterpreterFrameState(const CompactInterpreterFrameState&) = delete;
+ CompactInterpreterFrameState(CompactInterpreterFrameState&&) = delete;
+ CompactInterpreterFrameState& operator=(const CompactInterpreterFrameState&) =
+ delete;
+ CompactInterpreterFrameState& operator=(CompactInterpreterFrameState&&) =
+ delete;
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(live_registers_and_accumulator_[i], reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
+ for (int i = 0; i < info.parameter_count(); i++) {
+ interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
+ f(live_registers_and_accumulator_[i], reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(const MaglevCompilationUnit& info, Function&& f) const {
+ int live_reg = 0;
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(live_registers_and_accumulator_[info.parameter_count() + live_reg++],
+ reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachLocal(const MaglevCompilationUnit& info, Function&& f) {
+ int live_reg = 0;
+ for (int register_index : *liveness_) {
+ interpreter::Register reg = interpreter::Register(register_index);
+ f(live_registers_and_accumulator_[info.parameter_count() + live_reg++],
+ reg);
+ }
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
+ ForEachParameter(info, f);
+ ForEachLocal(info, f);
+ }
+
+ template <typename Function>
+ void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
+ ForEachParameter(info, f);
+ ForEachLocal(info, f);
+ }
+
+ template <typename Function>
+ void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
+ ForEachRegister(info, f);
+ if (liveness_->AccumulatorIsLive()) {
+ f(accumulator(info), interpreter::Register::virtual_accumulator());
+ }
+ }
+
+ template <typename Function>
+ void ForEachValue(const MaglevCompilationUnit& info, Function&& f) const {
+ ForEachRegister(info, f);
+ if (liveness_->AccumulatorIsLive()) {
+ f(accumulator(info), interpreter::Register::virtual_accumulator());
+ }
+ }
+
+ const compiler::BytecodeLivenessState* liveness() const { return liveness_; }
+
+ ValueNode*& accumulator(const MaglevCompilationUnit& info) {
+ return live_registers_and_accumulator_[size(info) - 1];
+ }
+ ValueNode* accumulator(const MaglevCompilationUnit& info) const {
+ return live_registers_and_accumulator_[size(info) - 1];
+ }
+
+ size_t size(const MaglevCompilationUnit& info) const {
+ return SizeFor(info, liveness_);
+ }
+
+ private:
+ static size_t SizeFor(const MaglevCompilationUnit& info,
+ const compiler::BytecodeLivenessState* liveness) {
+ return info.parameter_count() + liveness->live_value_count();
+ }
+
+ ValueNode** const live_registers_and_accumulator_;
+ const compiler::BytecodeLivenessState* const liveness_;
+};
+
class MergePointRegisterState {
public:
class Iterator {
@@ -108,6 +224,8 @@ class MergePointRegisterState {
class MergePointInterpreterFrameState {
public:
+ static constexpr BasicBlock* kDeadPredecessor = nullptr;
+
void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
int merge_offset, interpreter::Register reg,
ValueNode* value) {
@@ -132,17 +250,8 @@ class MergePointInterpreterFrameState {
const compiler::BytecodeLivenessState* liveness)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
- live_registers_and_accumulator_(
- info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
- liveness_(liveness),
- predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
- int live_index = 0;
- ForEachRegister(info, [&](interpreter::Register reg) {
- live_registers_and_accumulator_[live_index++] = state.get(reg);
- });
- if (liveness_->AccumulatorIsLive()) {
- live_registers_and_accumulator_[live_index++] = state.accumulator();
- }
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
+ frame_state_(info, liveness, state) {
predecessors_[0] = predecessor;
}
@@ -152,27 +261,24 @@ class MergePointInterpreterFrameState {
const compiler::LoopInfo* loop_info)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
- live_registers_and_accumulator_(
- info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
- liveness_(liveness),
- predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
- int live_index = 0;
+ predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
+ frame_state_(info, liveness) {
auto& assignments = loop_info->assignments();
- ForEachParameter(info, [&](interpreter::Register reg) {
- ValueNode* value = nullptr;
- if (assignments.ContainsParameter(reg.ToParameterIndex())) {
- value = NewLoopPhi(info.zone(), reg, merge_offset, value);
- }
- live_registers_and_accumulator_[live_index++] = value;
- });
- ForEachLocal([&](interpreter::Register reg) {
- ValueNode* value = nullptr;
- if (assignments.ContainsLocal(reg.index())) {
- value = NewLoopPhi(info.zone(), reg, merge_offset, value);
- }
- live_registers_and_accumulator_[live_index++] = value;
- });
- DCHECK(!liveness_->AccumulatorIsLive());
+ frame_state_.ForEachParameter(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = nullptr;
+ if (assignments.ContainsParameter(reg.ToParameterIndex())) {
+ entry = NewLoopPhi(info.zone(), reg, merge_offset);
+ }
+ });
+ frame_state_.ForEachLocal(
+ info, [&](ValueNode*& entry, interpreter::Register reg) {
+ entry = nullptr;
+ if (assignments.ContainsLocal(reg.index())) {
+ entry = NewLoopPhi(info.zone(), reg, merge_offset);
+ }
+ });
+ DCHECK(!frame_state_.liveness()->AccumulatorIsLive());
#ifdef DEBUG
predecessors_[0] = nullptr;
@@ -181,26 +287,24 @@ class MergePointInterpreterFrameState {
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
- void Merge(const MaglevCompilationUnit& compilation_unit,
+ void Merge(MaglevCompilationUnit& compilation_unit,
const InterpreterFrameState& unmerged, BasicBlock* predecessor,
int merge_offset) {
DCHECK_GT(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = predecessor;
- ForEachValue(
- compilation_unit, [&](interpreter::Register reg, ValueNode*& value) {
+ frame_state_.ForEachValue(
+ compilation_unit, [&](ValueNode*& value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
- value = MergeValue(compilation_unit.zone(), reg, value,
- unmerged.get(reg), merge_offset);
+ value = MergeValue(compilation_unit, reg, value, unmerged.get(reg),
+ merge_offset);
});
predecessors_so_far_++;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
}
- MergePointRegisterState& register_state() { return register_state_; }
-
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void MergeLoop(const MaglevCompilationUnit& compilation_unit,
@@ -210,16 +314,38 @@ class MergePointInterpreterFrameState {
DCHECK_NULL(predecessors_[0]);
predecessors_[0] = loop_end_block;
- ForEachValue(
- compilation_unit, [&](interpreter::Register reg, ValueNode* value) {
+ frame_state_.ForEachValue(
+ compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
MergeLoopValue(compilation_unit.zone(), reg, value,
loop_end_state.get(reg), merge_offset);
});
- DCHECK(!liveness_->AccumulatorIsLive());
}
+ // Merges a dead framestate (e.g. one which has been early terminated with a
+ // deopt).
+ void MergeDead() {
+ DCHECK_GT(predecessor_count_, 1);
+ DCHECK_LT(predecessors_so_far_, predecessor_count_);
+ predecessors_[predecessors_so_far_] = kDeadPredecessor;
+ predecessors_so_far_++;
+ DCHECK_LE(predecessors_so_far_, predecessor_count_);
+ }
+
+ // Merges a dead loop framestate (e.g. one where the block containing the
+ // JumpLoop has been early terminated with a deopt).
+ void MergeDeadLoop() {
+ DCHECK_EQ(predecessors_so_far_, predecessor_count_);
+ DCHECK_NULL(predecessors_[0]);
+ predecessors_[0] = kDeadPredecessor;
+ }
+
+ const CompactInterpreterFrameState& frame_state() const {
+ return frame_state_;
+ }
+ MergePointRegisterState& register_state() { return register_state_; }
+
bool has_phi() const { return !phis_.is_empty(); }
Phi::List* phis() { return &phis_; }
@@ -242,9 +368,40 @@ class MergePointInterpreterFrameState {
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
- ValueNode* MergeValue(Zone* zone, interpreter::Register owner,
- ValueNode* merged, ValueNode* unmerged,
- int merge_offset) {
+ ValueNode* TagValue(MaglevCompilationUnit& compilation_unit,
+ ValueNode* value) {
+ DCHECK(value->is_untagged_value());
+ if (value->Is<CheckedSmiUntag>()) {
+ return value->input(0).node();
+ }
+ DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
+ // Check if the next Node in the block after value is its CheckedSmiTag
+ // version and reuse it.
+ if (value->NextNode()) {
+ CheckedSmiTag* tagged = value->NextNode()->TryCast<CheckedSmiTag>();
+ if (tagged != nullptr && value == tagged->input().node()) {
+ return tagged;
+ }
+ }
+ // Otherwise create a tagged version.
+ ValueNode* tagged =
+ Node::New<CheckedSmiTag, std::initializer_list<ValueNode*>>(
+ compilation_unit.zone(), compilation_unit,
+ value->eager_deopt_info()->state, {value});
+ value->AddNodeAfter(tagged);
+ compilation_unit.RegisterNodeInGraphLabeller(tagged);
+ return tagged;
+ }
+
+ ValueNode* EnsureTagged(MaglevCompilationUnit& compilation_unit,
+ ValueNode* value) {
+ if (value->is_untagged_value()) return TagValue(compilation_unit, value);
+ return value;
+ }
+
+ ValueNode* MergeValue(MaglevCompilationUnit& compilation_unit,
+ interpreter::Register owner, ValueNode* merged,
+ ValueNode* unmerged, int merge_offset) {
// If the merged node is null, this is a pre-created loop header merge
// frame will null values for anything that isn't a loop Phi.
if (merged == nullptr) {
@@ -258,12 +415,22 @@ class MergePointInterpreterFrameState {
// It's possible that merged == unmerged at this point since loop-phis are
// not dropped if they are only assigned to themselves in the loop.
DCHECK_EQ(result->owner(), owner);
+ unmerged = EnsureTagged(compilation_unit, unmerged);
result->set_input(predecessors_so_far_, unmerged);
return result;
}
if (merged == unmerged) return merged;
+ // We guarantee that the values are tagged.
+ // TODO(victorgomes): Support Phi nodes of untagged values.
+ merged = EnsureTagged(compilation_unit, merged);
+ unmerged = EnsureTagged(compilation_unit, unmerged);
+
+ // Tagged versions could point to the same value, avoid Phi nodes in this
+ // case.
+ if (merged == unmerged) return merged;
+
// Up to this point all predecessors had the same value for this interpreter
// frame slot. Now that we find a distinct value, insert a copy of the first
// value for each predecessor seen so far, in addition to the new value.
@@ -272,7 +439,8 @@ class MergePointInterpreterFrameState {
// the frame slot. In that case we only need the inputs for representation
// selection, and hence could remove duplicate inputs. We'd likely need to
// attach the interpreter register to the phi in that case?
- result = Node::New<Phi>(zone, predecessor_count_, owner, merge_offset);
+ result = Node::New<Phi>(compilation_unit.zone(), predecessor_count_, owner,
+ merge_offset);
for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
result->set_input(predecessors_so_far_, unmerged);
@@ -297,8 +465,8 @@ class MergePointInterpreterFrameState {
result->set_input(0, unmerged);
}
- ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg, int merge_offset,
- ValueNode* initial_value) {
+ ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg,
+ int merge_offset) {
DCHECK_EQ(predecessors_so_far_, 1);
// Create a new loop phi, which for now is empty.
Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
@@ -308,89 +476,23 @@ class MergePointInterpreterFrameState {
phis_.Add(result);
return result;
}
- static int SizeFor(const MaglevCompilationUnit& info,
- const compiler::BytecodeLivenessState* liveness) {
- return info.parameter_count() + liveness->live_value_count();
- }
-
- template <typename Function>
- void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
- for (int i = 0; i < info.parameter_count(); i++) {
- interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
- for (int i = 0; i < info.parameter_count(); i++) {
- interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachLocal(Function&& f) const {
- for (int register_index : *liveness_) {
- interpreter::Register reg = interpreter::Register(register_index);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachLocal(Function&& f) {
- for (int register_index : *liveness_) {
- interpreter::Register reg = interpreter::Register(register_index);
- f(reg);
- }
- }
-
- template <typename Function>
- void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
- ForEachParameter(info, f);
- ForEachLocal(f);
- }
-
- template <typename Function>
- void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
- ForEachParameter(info, f);
- ForEachLocal(f);
- }
-
- template <typename Function>
- void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
- int live_index = 0;
- ForEachRegister(info, [&](interpreter::Register reg) {
- f(reg, live_registers_and_accumulator_[live_index++]);
- });
- if (liveness_->AccumulatorIsLive()) {
- f(interpreter::Register::virtual_accumulator(),
- live_registers_and_accumulator_[live_index++]);
- live_index++;
- }
- DCHECK_EQ(live_index, SizeFor(info, liveness_));
- }
int predecessor_count_;
int predecessors_so_far_;
Phi::List phis_;
- ValueNode** live_registers_and_accumulator_;
- const compiler::BytecodeLivenessState* liveness_ = nullptr;
BasicBlock** predecessors_;
+ CompactInterpreterFrameState frame_state_;
MergePointRegisterState register_state_;
};
void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state) {
- int live_index = 0;
- state.ForEachRegister(info, [&](interpreter::Register reg) {
- frame_[reg] = state.live_registers_and_accumulator_[live_index++];
- });
- if (state.liveness_->AccumulatorIsLive()) {
- accumulator_ = state.live_registers_and_accumulator_[live_index++];
- }
+ state.frame_state().ForEachValue(
+ info, [&](ValueNode* value, interpreter::Register reg) {
+ frame_[reg] = value;
+ });
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-ir.cc b/deps/v8/src/maglev/maglev-ir.cc
index 929a748330..c648ee581c 100644
--- a/deps/v8/src/maglev/maglev-ir.cc
+++ b/deps/v8/src/maglev/maglev-ir.cc
@@ -12,9 +12,11 @@
#include "src/compiler/backend/instruction.h"
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-code-gen-state.h"
+#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
+#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-vreg-allocator.h"
namespace v8 {
@@ -32,11 +34,12 @@ const char* ToString(Opcode opcode) {
// TODO(v8:7700): Clean up after all code paths are supported.
static bool g_this_field_will_be_unused_once_all_code_paths_are_supported;
-#define UNSUPPORTED() \
- do { \
- std::cerr << "Maglev: Can't compile, unsuppored codegen path.\n"; \
- code_gen_state->set_found_unsupported_code_paths(true); \
- g_this_field_will_be_unused_once_all_code_paths_are_supported = true; \
+#define UNSUPPORTED(REASON) \
+ do { \
+ std::cerr << "Maglev: Can't compile, unsuppored codegen path (" REASON \
+ ")\n"; \
+ code_gen_state->set_found_unsupported_code_paths(true); \
+ g_this_field_will_be_unused_once_all_code_paths_are_supported = true; \
} while (false)
namespace {
@@ -63,10 +66,7 @@ void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
vreg_state->AllocateVirtualRegister());
}
-// TODO(victorgomes): Use this for smi binary operation and remove attribute
-// [[maybe_unused]].
-[[maybe_unused]] void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
- Node* node) {
+void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
}
@@ -147,6 +147,10 @@ struct CopyForDeferredHelper<MaglevCompilationUnit*>
template <>
struct CopyForDeferredHelper<Register>
: public CopyForDeferredByValue<Register> {};
+// Bytecode offsets are copied by value.
+template <>
+struct CopyForDeferredHelper<BytecodeOffset>
+ : public CopyForDeferredByValue<BytecodeOffset> {};
// InterpreterFrameState is cloned.
template <>
@@ -158,6 +162,10 @@ struct CopyForDeferredHelper<const InterpreterFrameState*> {
*compilation_unit, *frame_state);
}
};
+// EagerDeoptInfo pointers are copied by value.
+template <>
+struct CopyForDeferredHelper<EagerDeoptInfo*>
+ : public CopyForDeferredByValue<EagerDeoptInfo*> {};
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
@@ -196,7 +204,7 @@ struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
};
template <typename Function>
-class DeferredCodeInfoImpl final : public MaglevCodeGenState::DeferredCodeInfo {
+class DeferredCodeInfoImpl final : public DeferredCodeInfo {
public:
using FunctionPointer =
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
@@ -252,64 +260,25 @@ void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
// Deopt
// ---
-void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
- int deopt_bytecode_position,
- const InterpreterFrameState* checkpoint_state) {
- DCHECK(node->properties().can_deopt());
- // TODO(leszeks): Extract to separate call, or at the very least defer.
-
- // TODO(leszeks): Stack check.
- MaglevCompilationUnit* compilation_unit = code_gen_state->compilation_unit();
- int maglev_frame_size = code_gen_state->vreg_slots();
-
- ASM_CODE_COMMENT_STRING(code_gen_state->masm(), "Deoptimize");
- __ RecordComment("Push registers and load accumulator");
- int num_saved_slots = 0;
- // TODO(verwaest): We probably shouldn't be spilling all values that go
- // through deopt :)
- for (int i = 0; i < compilation_unit->register_count(); ++i) {
- ValueNode* node = checkpoint_state->get(interpreter::Register(i));
- if (node == nullptr) continue;
- __ Push(ToMemOperand(node->spill_slot()));
- num_saved_slots++;
- }
- ValueNode* accumulator = checkpoint_state->accumulator();
- if (accumulator) {
- __ movq(kInterpreterAccumulatorRegister,
- ToMemOperand(accumulator->spill_slot()));
- }
-
- __ RecordComment("Load registers from extra pushed slots");
- int slot = 0;
- for (int i = 0; i < compilation_unit->register_count(); ++i) {
- ValueNode* node = checkpoint_state->get(interpreter::Register(i));
- if (node == nullptr) continue;
- __ movq(kScratchRegister, MemOperand(rsp, (num_saved_slots - slot++ - 1) *
- kSystemPointerSize));
- __ movq(MemOperand(rbp, InterpreterFrameConstants::kRegisterFileFromFp -
- i * kSystemPointerSize),
- kScratchRegister);
+void RegisterEagerDeopt(MaglevCodeGenState* code_gen_state,
+ EagerDeoptInfo* deopt_info) {
+ if (deopt_info->deopt_entry_label.is_unused()) {
+ code_gen_state->PushEagerDeopt(deopt_info);
}
- DCHECK_EQ(slot, num_saved_slots);
-
- __ RecordComment("Materialize bytecode array and offset");
- __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
- compilation_unit->bytecode().object());
- __ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
- Smi::FromInt(deopt_bytecode_position +
- (BytecodeArray::kHeaderSize - kHeapObjectTag)));
+}
- // Reset rsp to bytecode sized frame.
- __ addq(rsp, Immediate((maglev_frame_size + num_saved_slots -
- (2 + compilation_unit->register_count())) *
- kSystemPointerSize));
- __ TailCallBuiltin(Builtin::kBaselineOrInterpreterEnterAtBytecode);
+void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
+ EagerDeoptInfo* deopt_info) {
+ RegisterEagerDeopt(code_gen_state, deopt_info);
+ __ RecordComment("-- Jump to eager deopt");
+ __ j(cond, &deopt_info->deopt_entry_label);
}
-void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
- const ProcessingState& state) {
- EmitDeopt(code_gen_state, node, state.checkpoint()->bytecode_position(),
- state.checkpoint_frame_state());
+template <typename NodeT>
+void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
+ NodeT* node) {
+ STATIC_ASSERT(NodeT::kProperties.can_eager_deopt());
+ EmitEagerDeoptIf(cond, code_gen_state, node->eager_deopt_info());
}
// ---
@@ -378,6 +347,20 @@ void NodeBase::Print(std::ostream& os,
UNREACHABLE();
}
+DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState state)
+ : state(state),
+ input_locations(zone->NewArray<InputLocation>(
+ state.register_frame->size(compilation_unit))) {
+ // Default initialise if we're printing the graph, to avoid printing junk
+ // values.
+ if (FLAG_print_maglev_graph) {
+ for (size_t i = 0; i < state.register_frame->size(compilation_unit); ++i) {
+ new (&input_locations[i]) InputLocation();
+ }
+ }
+}
+
// ---
// Nodes
// ---
@@ -394,29 +377,13 @@ void SmiConstant::PrintParams(std::ostream& os,
os << "(" << value() << ")";
}
-void Checkpoint::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {}
-void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {}
-void Checkpoint::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << PrintNodeLabel(graph_labeller, accumulator()) << ")";
-}
-
-void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {}
-void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
- EmitDeopt(code_gen_state, this, state);
-}
-
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
}
void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
- UNREACHABLE();
+ __ Move(ToRegister(result()), object_.object());
}
void Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
@@ -516,21 +483,20 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
__ Cmp(map_tmp, map().object());
// TODO(leszeks): Encode as a bit on CheckMaps.
- if (map().object()->is_migration_target()) {
+ if (map().is_migration_target()) {
JumpToDeferredIf(
not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
- Register object, CheckMaps* node, int checkpoint_position,
- const InterpreterFrameState* checkpoint_state_snapshot,
+ Register object, CheckMaps* node, EagerDeoptInfo* deopt_info,
Register map_tmp) {
- Label deopt;
+ RegisterEagerDeopt(code_gen_state, deopt_info);
// If the map is not deprecated, deopt straight away.
__ movl(kScratchRegister,
FieldOperand(map_tmp, Map::kBitField3Offset));
__ testl(kScratchRegister,
Immediate(Map::Bits3::IsDeprecatedBit::kMask));
- __ j(zero, &deopt);
+ __ j(zero, &deopt_info->deopt_entry_label);
// Otherwise, try migrating the object. If the migration returns Smi
// zero, then it failed and we should deopt.
@@ -540,25 +506,18 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
// TODO(verwaest): We're calling so we need to spill around it.
__ CallRuntime(Runtime::kTryMigrateInstance);
__ cmpl(kReturnRegister0, Immediate(0));
- __ j(equal, &deopt);
+ __ j(equal, &deopt_info->deopt_entry_label);
// The migrated object is returned on success, retry the map check.
__ Move(object, kReturnRegister0);
__ LoadMap(map_tmp, object);
__ Cmp(map_tmp, node->map().object());
__ j(equal, return_label);
-
- __ bind(&deopt);
- EmitDeopt(code_gen_state, node, checkpoint_position,
- checkpoint_state_snapshot);
+ __ jmp(&deopt_info->deopt_entry_label);
},
- object, this, state.checkpoint()->bytecode_position(),
- state.checkpoint_frame_state(), map_tmp);
+ object, this, eager_deopt_info(), map_tmp);
} else {
- Label is_ok;
- __ j(equal, &is_ok);
- EmitDeopt(code_gen_state, this, state);
- __ bind(&is_ok);
+ EmitEagerDeoptIf(not_equal, code_gen_state, this);
}
}
void CheckMaps::PrintParams(std::ostream& os,
@@ -580,19 +539,28 @@ void LoadField::GenerateCode(MaglevCodeGenState* code_gen_state,
// LoadHandler::FieldIndexBits::decode(raw_handler);
Register object = ToRegister(object_input());
+ Register res = ToRegister(result());
int handler = this->handler();
if (LoadHandler::IsInobjectBits::decode(handler)) {
Operand input_field_operand = FieldOperand(
object, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
- __ DecompressAnyTagged(ToRegister(result()), input_field_operand);
- if (LoadHandler::IsDoubleBits::decode(handler)) {
- // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
- UNSUPPORTED();
- }
+ __ DecompressAnyTagged(res, input_field_operand);
} else {
- // TODO(leszeks): Handle out-of-object properties.
- UNSUPPORTED();
+ Operand property_array_operand =
+ FieldOperand(object, JSReceiver::kPropertiesOrHashOffset);
+ __ DecompressAnyTagged(res, property_array_operand);
+
+ __ AssertNotSmi(res);
+
+ Operand input_field_operand = FieldOperand(
+ res, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
+ __ DecompressAnyTagged(res, input_field_operand);
+ }
+
+ if (LoadHandler::IsDoubleBits::decode(handler)) {
+ // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
+ UNSUPPORTED("LoadField double property");
}
}
void LoadField::PrintParams(std::ostream& os,
@@ -617,7 +585,7 @@ void StoreField::GenerateCode(MaglevCodeGenState* code_gen_state,
__ StoreTaggedField(operand, value);
} else {
// TODO(victorgomes): Out-of-object properties.
- UNSUPPORTED();
+ UNSUPPORTED("StoreField out-of-object property");
}
}
@@ -628,37 +596,27 @@ void StoreField::PrintParams(std::ostream& os,
void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
- using D = LoadNoFeedbackDescriptor;
+ using D = LoadWithVectorDescriptor;
UseFixed(context(), kContextRegister);
UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void LoadNamedGeneric::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
- using D = LoadNoFeedbackDescriptor;
- const int ic_kind = static_cast<int>(FeedbackSlotKind::kLoadProperty);
+ using D = LoadWithVectorDescriptor;
DCHECK_EQ(ToRegister(context()), kContextRegister);
DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
__ Move(D::GetRegisterParameter(D::kName), name().object());
- __ Move(D::GetRegisterParameter(D::kICKind),
- Immediate(Smi::FromInt(ic_kind)));
- __ CallBuiltin(Builtin::kLoadIC_NoFeedback);
+ __ Move(D::GetRegisterParameter(D::kSlot),
+ Smi::FromInt(feedback().slot.ToInt()));
+ __ Move(D::GetRegisterParameter(D::kVector), feedback().vector);
+ __ CallBuiltin(Builtin::kLoadIC);
}
void LoadNamedGeneric::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << name_ << ")";
}
-void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {}
-void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {}
-void StoreToFrame::PrintParams(std::ostream& os,
- MaglevGraphLabeller* graph_labeller) const {
- os << "(" << target().ToString() << " ← "
- << PrintNodeLabel(graph_labeller, value()) << ")";
-}
-
void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UNREACHABLE();
@@ -753,6 +711,64 @@ void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
#undef DEF_OPERATION
+void CheckedSmiUntag::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(input());
+ DefineSameAsFirst(vreg_state, this);
+}
+
+void CheckedSmiUntag::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register value = ToRegister(input());
+ // TODO(leszeks): Consider optimizing away this test and using the carry bit
+ // of the `sarl` for cases where the deopt uses the value from a different
+ // register.
+ __ testb(value, Immediate(1));
+ EmitEagerDeoptIf(not_zero, code_gen_state, this);
+ __ sarl(value, Immediate(1));
+}
+
+void CheckedSmiTag::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(input());
+ DefineSameAsFirst(vreg_state, this);
+}
+
+void CheckedSmiTag::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ __ addl(reg, reg);
+ EmitEagerDeoptIf(overflow, code_gen_state, this);
+}
+
+void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ DefineAsRegister(vreg_state, this);
+}
+void Int32Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ __ Move(ToRegister(result()), Immediate(value()));
+}
+void Int32Constant::PrintParams(std::ostream& os,
+ MaglevGraphLabeller* graph_labeller) const {
+ os << "(" << value() << ")";
+}
+
+void Int32AddWithOverflow::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
+ UseRegister(left_input());
+ UseRegister(right_input());
+ DefineSameAsFirst(vreg_state, this);
+}
+
+void Int32AddWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ Register left = ToRegister(left_input());
+ Register right = ToRegister(right_input());
+ __ addl(left, right);
+ EmitEagerDeoptIf(overflow, code_gen_state, this);
+}
+
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
// Phi inputs are processed in the post-process, once loop phis' inputs'
@@ -768,16 +784,14 @@ void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
}
}
void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
- DCHECK_EQ(state.interpreter_frame_state()->get(owner()), this);
-}
+ const ProcessingState& state) {}
void Phi::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << owner().ToString() << ")";
}
-void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {
+void Call::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {
UseFixed(function(), CallTrampolineDescriptor::GetRegisterParameter(
CallTrampolineDescriptor::kFunction));
UseFixed(context(), kContextRegister);
@@ -786,8 +800,8 @@ void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
}
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
-void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
+void Call::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
// TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
DCHECK_EQ(ToRegister(function()),
@@ -806,16 +820,25 @@ void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
// TODO(leszeks): This doesn't collect feedback yet, either pass in the
// feedback vector by Handle.
- __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
-}
+ switch (receiver_mode_) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ __ CallBuiltin(Builtin::kCall_ReceiverIsNullOrUndefined);
+ break;
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
+ break;
+ case ConvertReceiverMode::kAny:
+ __ CallBuiltin(Builtin::kCall_ReceiverIsAny);
+ break;
+ }
-void CallUndefinedReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state,
- const ProcessingState& state) {
- UNREACHABLE();
-}
-void CallUndefinedReceiver::GenerateCode(MaglevCodeGenState* code_gen_state,
- const ProcessingState& state) {
- UNREACHABLE();
+ lazy_deopt_info()->deopting_call_return_pc = __ pc_offset_for_safepoint();
+ code_gen_state->PushLazyDeopt(lazy_deopt_info());
+
+ SafepointTableBuilder::Safepoint safepoint =
+ code_gen_state->safepoint_table_builder()->DefineSafepoint(
+ code_gen_state->masm());
+ code_gen_state->DefineSafepointStackSlots(safepoint);
}
// ---
@@ -829,9 +852,42 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
+ // We're not going to continue execution, so we can use an arbitrary register
+ // here instead of relying on temporaries from the register allocator.
+ Register actual_params_size = r8;
+
+ // Compute the size of the actual parameters + receiver (in bytes).
+ // TODO(leszeks): Consider making this an input into Return to re-use the
+ // incoming argc's register (if it's still valid).
+ __ movq(actual_params_size,
+ MemOperand(rbp, StandardFrameConstants::kArgCOffset));
+
+ // Leave the frame.
+ // TODO(leszeks): Add a new frame maker for Maglev.
__ LeaveFrame(StackFrame::BASELINE);
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label drop_dynamic_arg_size;
+ __ cmpq(actual_params_size, Immediate(code_gen_state->parameter_count()));
+ __ j(greater, &drop_dynamic_arg_size);
+
+ // Drop receiver + arguments according to static formal arguments size.
__ Ret(code_gen_state->parameter_count() * kSystemPointerSize,
kScratchRegister);
+
+ __ bind(&drop_dynamic_arg_size);
+ // Drop receiver + arguments according to dynamic arguments size.
+ __ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger,
+ TurboAssembler::kCountIncludesReceiver);
+ __ Ret();
+}
+
+void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
+ const ProcessingState& state) {}
+void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
+ const ProcessingState& state) {
+ EmitEagerDeoptIf(always, code_gen_state, this);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h
index 398f9254d9..1f7c5471de 100644
--- a/deps/v8/src/maglev/maglev-ir.h
+++ b/deps/v8/src/maglev/maglev-ir.h
@@ -9,14 +9,17 @@
#include "src/base/macros.h"
#include "src/base/small-vector.h"
#include "src/base/threaded-list.h"
+#include "src/codegen/label.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
#include "src/common/operation.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/heap-refs.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/maglev/maglev-compilation-unit.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
+#include "src/utils/utils.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -26,8 +29,10 @@ namespace maglev {
class BasicBlock;
class ProcessingState;
class MaglevCodeGenState;
+class MaglevCompilationUnit;
class MaglevGraphLabeller;
class MaglevVregAllocationState;
+class CompactInterpreterFrameState;
// Nodes are either
// 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
@@ -61,8 +66,7 @@ class MaglevVregAllocationState;
V(GenericGreaterThanOrEqual)
#define VALUE_NODE_LIST(V) \
- V(CallProperty) \
- V(CallUndefinedReceiver) \
+ V(Call) \
V(Constant) \
V(InitialValue) \
V(LoadField) \
@@ -72,15 +76,16 @@ class MaglevVregAllocationState;
V(RegisterInput) \
V(RootConstant) \
V(SmiConstant) \
+ V(CheckedSmiTag) \
+ V(CheckedSmiUntag) \
+ V(Int32AddWithOverflow) \
+ V(Int32Constant) \
GENERIC_OPERATIONS_NODE_LIST(V)
#define NODE_LIST(V) \
- V(Checkpoint) \
V(CheckMaps) \
V(GapMove) \
- V(SoftDeopt) \
V(StoreField) \
- V(StoreToFrame) \
VALUE_NODE_LIST(V)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
@@ -93,6 +98,7 @@ class MaglevVregAllocationState;
#define CONTROL_NODE_LIST(V) \
V(Return) \
+ V(Deopt) \
CONDITIONAL_CONTROL_NODE_LIST(V) \
UNCONDITIONAL_CONTROL_NODE_LIST(V)
@@ -159,6 +165,11 @@ class ConditionalControlNode;
class UnconditionalControlNode;
class ValueNode;
+enum class ValueRepresentation {
+ kTagged,
+ kUntagged,
+};
+
#define DEF_FORWARD_DECLARATION(type, ...) class type;
NODE_BASE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@@ -168,16 +179,26 @@ static constexpr uint32_t kInvalidNodeId = 0;
class OpProperties {
public:
- bool is_call() const { return kIsCallBit::decode(bitfield_); }
- bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); }
- bool can_read() const { return kCanReadBit::decode(bitfield_); }
- bool can_write() const { return kCanWriteBit::decode(bitfield_); }
- bool non_memory_side_effects() const {
+ constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); }
+ constexpr bool can_eager_deopt() const {
+ return kCanEagerDeoptBit::decode(bitfield_);
+ }
+ constexpr bool can_lazy_deopt() const {
+ return kCanLazyDeoptBit::decode(bitfield_);
+ }
+ constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); }
+ constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); }
+ constexpr bool non_memory_side_effects() const {
return kNonMemorySideEffectsBit::decode(bitfield_);
}
+ constexpr bool is_untagged_value() const {
+ return kUntaggedValueBit::decode(bitfield_);
+ }
- bool is_pure() const { return (bitfield_ | kPureMask) == kPureValue; }
- bool is_required_when_unused() const {
+ constexpr bool is_pure() const {
+ return (bitfield_ | kPureMask) == kPureValue;
+ }
+ constexpr bool is_required_when_unused() const {
return can_write() || non_memory_side_effects();
}
@@ -189,8 +210,11 @@ class OpProperties {
static constexpr OpProperties Call() {
return OpProperties(kIsCallBit::encode(true));
}
- static constexpr OpProperties Deopt() {
- return OpProperties(kCanDeoptBit::encode(true));
+ static constexpr OpProperties EagerDeopt() {
+ return OpProperties(kCanEagerDeoptBit::encode(true));
+ }
+ static constexpr OpProperties LazyDeopt() {
+ return OpProperties(kCanLazyDeoptBit::encode(true));
}
static constexpr OpProperties Reading() {
return OpProperties(kCanReadBit::encode(true));
@@ -201,16 +225,27 @@ class OpProperties {
static constexpr OpProperties NonMemorySideEffects() {
return OpProperties(kNonMemorySideEffectsBit::encode(true));
}
+ static constexpr OpProperties UntaggedValue() {
+ return OpProperties(kUntaggedValueBit::encode(true));
+ }
+ static constexpr OpProperties JSCall() {
+ return Call() | NonMemorySideEffects() | LazyDeopt();
+ }
static constexpr OpProperties AnySideEffects() {
return Reading() | Writing() | NonMemorySideEffects();
}
+ constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
+ operator uint32_t() const { return bitfield_; }
+
private:
using kIsCallBit = base::BitField<bool, 0, 1>;
- using kCanDeoptBit = kIsCallBit::Next<bool, 1>;
- using kCanReadBit = kCanDeoptBit::Next<bool, 1>;
+ using kCanEagerDeoptBit = kIsCallBit::Next<bool, 1>;
+ using kCanLazyDeoptBit = kCanEagerDeoptBit::Next<bool, 1>;
+ using kCanReadBit = kCanLazyDeoptBit::Next<bool, 1>;
using kCanWriteBit = kCanReadBit::Next<bool, 1>;
using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
+ using kUntaggedValueBit = kNonMemorySideEffectsBit::Next<bool, 1>;
static const uint32_t kPureMask = kCanReadBit::kMask | kCanWriteBit::kMask |
kNonMemorySideEffectsBit::kMask;
@@ -218,9 +253,10 @@ class OpProperties {
kCanWriteBit::encode(false) |
kNonMemorySideEffectsBit::encode(false);
- constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
+ const uint32_t bitfield_;
- uint32_t bitfield_;
+ public:
+ static const size_t kSize = kUntaggedValueBit::kLastUsedBit + 1;
};
class ValueLocation {
@@ -263,22 +299,66 @@ class ValueLocation {
compiler::InstructionOperand operand_;
};
-class Input : public ValueLocation {
+class InputLocation : public ValueLocation {
public:
- explicit Input(ValueNode* node) : node_(node) {}
-
- ValueNode* node() const { return node_; }
-
NodeIdT next_use_id() const { return next_use_id_; }
-
// Used in ValueNode::mark_use
NodeIdT* get_next_use_id_address() { return &next_use_id_; }
private:
- ValueNode* node_;
NodeIdT next_use_id_ = kInvalidNodeId;
};
+class Input : public InputLocation {
+ public:
+ explicit Input(ValueNode* node) : node_(node) {}
+ ValueNode* node() const { return node_; }
+
+ private:
+ ValueNode* node_;
+};
+
+class CheckpointedInterpreterState {
+ public:
+ CheckpointedInterpreterState() = default;
+ CheckpointedInterpreterState(BytecodeOffset bytecode_position,
+ const CompactInterpreterFrameState* state)
+ : bytecode_position(bytecode_position), register_frame(state) {}
+
+ BytecodeOffset bytecode_position = BytecodeOffset::None();
+ const CompactInterpreterFrameState* register_frame = nullptr;
+};
+
+class DeoptInfo {
+ protected:
+ DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint);
+
+ public:
+ CheckpointedInterpreterState state;
+ InputLocation* input_locations = nullptr;
+ Label deopt_entry_label;
+ int deopt_index = -1;
+};
+
+class EagerDeoptInfo : public DeoptInfo {
+ public:
+ EagerDeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint)
+ : DeoptInfo(zone, compilation_unit, checkpoint) {}
+};
+
+class LazyDeoptInfo : public DeoptInfo {
+ public:
+ LazyDeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint)
+ : DeoptInfo(zone, compilation_unit, checkpoint) {}
+
+ int deopting_call_return_pc = -1;
+ interpreter::Register result_location =
+ interpreter::Register::invalid_value();
+};
+
// Dummy type for the initial raw allocation.
struct NodeWithInlineInputs {};
@@ -296,10 +376,23 @@ struct opcode_of_helper;
};
NODE_BASE_LIST(DEF_OPCODE_OF)
#undef DEF_OPCODE_OF
+
} // namespace detail
class NodeBase : public ZoneObject {
+ private:
+ // Bitfield specification.
+ using OpcodeField = base::BitField<Opcode, 0, 6>;
+ STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
+ using OpPropertiesField =
+ OpcodeField::Next<OpProperties, OpProperties::kSize>;
+ using InputCountField = OpPropertiesField::Next<uint16_t, 16>;
+
protected:
+ // Subclasses may use the remaining bitfield bits.
+ template <class T, int size>
+ using NextBitField = InputCountField::Next<T, size>;
+
template <class T>
static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
@@ -319,6 +412,21 @@ class NodeBase : public ZoneObject {
return node;
}
+ template <class Derived, typename... Args>
+ static Derived* New(Zone* zone, const MaglevCompilationUnit& compilation_unit,
+ CheckpointedInterpreterState checkpoint, Args&&... args) {
+ Derived* node = New<Derived>(zone, std::forward<Args>(args)...);
+ if constexpr (Derived::kProperties.can_eager_deopt()) {
+ new (node->eager_deopt_info_address())
+ EagerDeoptInfo(zone, compilation_unit, checkpoint);
+ } else {
+ STATIC_ASSERT(Derived::kProperties.can_lazy_deopt());
+ new (node->lazy_deopt_info_address())
+ LazyDeoptInfo(zone, compilation_unit, checkpoint);
+ }
+ return node;
+ }
+
// Inputs must be initialized manually.
template <class Derived, typename... Args>
static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
@@ -329,9 +437,11 @@ class NodeBase : public ZoneObject {
// Overwritten by subclasses.
static constexpr OpProperties kProperties = OpProperties::Pure();
- inline const OpProperties& properties() const;
constexpr Opcode opcode() const { return OpcodeField::decode(bit_field_); }
+ OpProperties properties() const {
+ return OpPropertiesField::decode(bit_field_);
+ }
template <class T>
constexpr bool Is() const;
@@ -407,15 +517,45 @@ class NodeBase : public ZoneObject {
void Print(std::ostream& os, MaglevGraphLabeller*) const;
+ EagerDeoptInfo* eager_deopt_info() {
+ DCHECK(properties().can_eager_deopt());
+ DCHECK(!properties().can_lazy_deopt());
+ return (
+ reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
+ 1);
+ }
+
+ const EagerDeoptInfo* eager_deopt_info() const {
+ DCHECK(properties().can_eager_deopt());
+ DCHECK(!properties().can_lazy_deopt());
+ return (reinterpret_cast<const EagerDeoptInfo*>(
+ input_address(input_count() - 1)) -
+ 1);
+ }
+
+ LazyDeoptInfo* lazy_deopt_info() {
+ DCHECK(properties().can_lazy_deopt());
+ DCHECK(!properties().can_eager_deopt());
+ return (reinterpret_cast<LazyDeoptInfo*>(input_address(input_count() - 1)) -
+ 1);
+ }
+
+ const LazyDeoptInfo* lazy_deopt_info() const {
+ DCHECK(properties().can_lazy_deopt());
+ DCHECK(!properties().can_eager_deopt());
+ return (reinterpret_cast<const LazyDeoptInfo*>(
+ input_address(input_count() - 1)) -
+ 1);
+ }
+
protected:
- NodeBase(Opcode opcode, size_t input_count)
- : bit_field_(OpcodeField::encode(opcode) |
- InputCountField::encode(input_count)) {}
+ explicit NodeBase(uint32_t bitfield) : bit_field_(bitfield) {}
Input* input_address(int index) {
DCHECK_LT(index, input_count());
return reinterpret_cast<Input*>(this) - (index + 1);
}
+
const Input* input_address(int index) const {
DCHECK_LT(index, input_count());
return reinterpret_cast<const Input*>(this) - (index + 1);
@@ -425,36 +565,54 @@ class NodeBase : public ZoneObject {
new (input_address(index)) Input(input);
}
+ void set_temporaries_needed(int value) {
+#ifdef DEBUG
+ DCHECK_EQ(kTemporariesState, kUnset);
+ kTemporariesState = kNeedsTemporaries;
+#endif // DEBUG
+ num_temporaries_needed_ = value;
+ }
+
+ EagerDeoptInfo* eager_deopt_info_address() {
+ DCHECK(properties().can_eager_deopt());
+ DCHECK(!properties().can_lazy_deopt());
+ return reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
+ 1;
+ }
+
+ LazyDeoptInfo* lazy_deopt_info_address() {
+ DCHECK(!properties().can_eager_deopt());
+ DCHECK(properties().can_lazy_deopt());
+ return reinterpret_cast<LazyDeoptInfo*>(input_address(input_count() - 1)) -
+ 1;
+ }
+
private:
template <class Derived, typename... Args>
static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
- const size_t size = sizeof(Derived) + input_count * sizeof(Input);
+ static_assert(
+ !Derived::kProperties.can_eager_deopt() ||
+ !Derived::kProperties.can_lazy_deopt(),
+ "The current deopt info representation, at the end of inputs, requires "
+ "that we cannot have both lazy and eager deopts on a node. If we ever "
+ "need this, we have to update accessors to check node->properties() "
+ "for which deopts are active.");
+ const size_t size_before_node =
+ input_count * sizeof(Input) +
+ (Derived::kProperties.can_eager_deopt() ? sizeof(EagerDeoptInfo) : 0) +
+ (Derived::kProperties.can_lazy_deopt() ? sizeof(LazyDeoptInfo) : 0);
+ const size_t size = size_before_node + sizeof(Derived);
intptr_t raw_buffer =
reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size));
- void* node_buffer =
- reinterpret_cast<void*>(raw_buffer + input_count * sizeof(Input));
+ void* node_buffer = reinterpret_cast<void*>(raw_buffer + size_before_node);
+ uint32_t bitfield = OpcodeField::encode(opcode_of<Derived>) |
+ OpPropertiesField::encode(Derived::kProperties) |
+ InputCountField::encode(input_count);
Derived* node =
- new (node_buffer) Derived(input_count, std::forward<Args>(args)...);
+ new (node_buffer) Derived(bitfield, std::forward<Args>(args)...);
return node;
}
- protected:
- // Bitfield specification.
- using OpcodeField = base::BitField<Opcode, 0, 6>;
- STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
- using InputCountField = OpcodeField::Next<uint16_t, 16>;
- // Subclasses may use the remaining bits.
- template <class T, int size>
- using NextBitField = InputCountField::Next<T, size>;
-
- void set_temporaries_needed(int value) {
-#ifdef DEBUG
- DCHECK_EQ(kTemporariesState, kUnset);
- kTemporariesState = kNeedsTemporaries;
-#endif // DEBUG
- num_temporaries_needed_ = value;
- }
-
uint32_t bit_field_;
private:
@@ -505,13 +663,24 @@ class Node : public NodeBase {
inline ValueLocation& result();
+ // This might break ThreadedList invariants.
+ // Run ThreadedList::RevalidateTail afterwards.
+ void AddNodeAfter(Node* node) {
+ DCHECK_NOT_NULL(node);
+ DCHECK_NULL(node->next_);
+ node->next_ = next_;
+ next_ = node;
+ }
+
+ Node* NextNode() const { return next_; }
+
protected:
- explicit Node(Opcode opcode, size_t input_count)
- : NodeBase(opcode, input_count) {}
+ using NodeBase::NodeBase;
private:
Node** next() { return &next_; }
Node* next_ = nullptr;
+
friend List;
friend base::ThreadedListTraits<Node>;
};
@@ -559,16 +728,14 @@ class ValueNode : public Node {
return compiler::AllocatedOperand::cast(spill_or_hint_);
}
- void mark_use(NodeIdT id, Input* use) {
+ void mark_use(NodeIdT id, InputLocation* input_location) {
DCHECK_EQ(state_, kLastUse);
DCHECK_NE(id, kInvalidNodeId);
DCHECK_LT(start_id(), id);
DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_);
end_id_ = id;
*last_uses_next_use_id_ = id;
- if (use) {
- last_uses_next_use_id_ = use->get_next_use_id_address();
- }
+ last_uses_next_use_id_ = input_location->get_next_use_id_address();
}
struct LiveRange {
@@ -606,9 +773,16 @@ class ValueNode : public Node {
return compiler::AllocatedOperand::cast(spill_or_hint_);
}
+ bool is_untagged_value() const { return properties().is_untagged_value(); }
+
+ ValueRepresentation value_representation() const {
+ return is_untagged_value() ? ValueRepresentation::kUntagged
+ : ValueRepresentation::kTagged;
+ }
+
protected:
- explicit ValueNode(Opcode opcode, size_t input_count)
- : Node(opcode, input_count),
+ explicit ValueNode(uint32_t bitfield)
+ : Node(bitfield),
last_uses_next_use_id_(&next_use_)
#ifdef DEBUG
,
@@ -647,11 +821,13 @@ class NodeT : public Node {
STATIC_ASSERT(!IsValueNode(opcode_of<Derived>));
public:
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr Opcode opcode() const { return opcode_of<Derived>; }
const OpProperties& properties() const { return Derived::kProperties; }
protected:
- explicit NodeT(size_t input_count) : Node(opcode_of<Derived>, input_count) {}
+ explicit NodeT(uint32_t bitfield) : Node(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ }
};
template <size_t InputCount, class Derived>
@@ -667,9 +843,8 @@ class FixedInputNodeT : public NodeT<Derived> {
}
protected:
- explicit FixedInputNodeT(size_t input_count) : NodeT<Derived>(kInputCount) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ explicit FixedInputNodeT(uint32_t bitfield) : NodeT<Derived>(bitfield) {
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
@@ -678,12 +853,13 @@ class ValueNodeT : public ValueNode {
STATIC_ASSERT(IsValueNode(opcode_of<Derived>));
public:
- constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
+ constexpr Opcode opcode() const { return opcode_of<Derived>; }
const OpProperties& properties() const { return Derived::kProperties; }
protected:
- explicit ValueNodeT(size_t input_count)
- : ValueNode(opcode_of<Derived>, input_count) {}
+ explicit ValueNodeT(uint32_t bitfield) : ValueNode(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ }
};
template <size_t InputCount, class Derived>
@@ -699,10 +875,9 @@ class FixedInputValueNodeT : public ValueNodeT<Derived> {
}
protected:
- explicit FixedInputValueNodeT(size_t input_count)
- : ValueNodeT<Derived>(InputCount) {
- DCHECK_EQ(input_count, InputCount);
- USE(input_count);
+ explicit FixedInputValueNodeT(uint32_t bitfield)
+ : ValueNodeT<Derived>(bitfield) {
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
@@ -712,16 +887,16 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
public:
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
static constexpr int kOperandIndex = 0;
Input& operand_input() { return Node::input(kOperandIndex); }
compiler::FeedbackSource feedback() const { return feedback_; }
protected:
- explicit UnaryWithFeedbackNode(size_t input_count,
+ explicit UnaryWithFeedbackNode(uint32_t bitfield,
const compiler::FeedbackSource& feedback)
- : Base(input_count), feedback_(feedback) {}
+ : Base(bitfield), feedback_(feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -736,7 +911,7 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
public:
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@@ -745,9 +920,9 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
compiler::FeedbackSource feedback() const { return feedback_; }
protected:
- BinaryWithFeedbackNode(size_t input_count,
+ BinaryWithFeedbackNode(uint32_t bitfield,
const compiler::FeedbackSource& feedback)
- : Base(input_count), feedback_(feedback) {}
+ : Base(bitfield), feedback_(feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -761,8 +936,8 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
- Name(size_t input_count, const compiler::FeedbackSource& feedback) \
- : Base(input_count, feedback) {} \
+ Name(uint32_t bitfield, const compiler::FeedbackSource& feedback) \
+ : Base(bitfield, feedback) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
@@ -778,12 +953,82 @@ COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
#undef DEF_UNARY_WITH_FEEDBACK_NODE
#undef DEF_BINARY_WITH_FEEDBACK_NODE
+class CheckedSmiTag : public FixedInputValueNodeT<1, CheckedSmiTag> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiTag>;
+
+ public:
+ explicit CheckedSmiTag(uint32_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+
+ Input& input() { return Node::input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class CheckedSmiUntag : public FixedInputValueNodeT<1, CheckedSmiUntag> {
+ using Base = FixedInputValueNodeT<1, CheckedSmiUntag>;
+
+ public:
+ explicit CheckedSmiUntag(uint32_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::UntaggedValue();
+
+ Input& input() { return Node::input(0); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
+class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
+ using Base = FixedInputValueNodeT<0, Int32Constant>;
+
+ public:
+ explicit Int32Constant(uint32_t bitfield, int32_t value)
+ : Base(bitfield), value_(value) {}
+
+ static constexpr OpProperties kProperties = OpProperties::UntaggedValue();
+
+ int32_t value() const { return value_; }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
+
+ private:
+ const int32_t value_;
+};
+
+class Int32AddWithOverflow
+ : public FixedInputValueNodeT<2, Int32AddWithOverflow> {
+ using Base = FixedInputValueNodeT<2, Int32AddWithOverflow>;
+
+ public:
+ explicit Int32AddWithOverflow(uint32_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::UntaggedValue();
+
+ static constexpr int kLeftIndex = 0;
+ static constexpr int kRightIndex = 1;
+ Input& left_input() { return Node::input(kLeftIndex); }
+ Input& right_input() { return Node::input(kRightIndex); }
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
using Base = FixedInputValueNodeT<0, InitialValue>;
public:
- explicit InitialValue(size_t input_count, interpreter::Register source)
- : Base(input_count), source_(source) {}
+ explicit InitialValue(uint32_t bitfield, interpreter::Register source)
+ : Base(bitfield), source_(source) {}
interpreter::Register source() const { return source_; }
@@ -799,8 +1044,8 @@ class RegisterInput : public FixedInputValueNodeT<0, RegisterInput> {
using Base = FixedInputValueNodeT<0, RegisterInput>;
public:
- explicit RegisterInput(size_t input_count, Register input)
- : Base(input_count), input_(input) {}
+ explicit RegisterInput(uint32_t bitfield, Register input)
+ : Base(bitfield), input_(input) {}
Register input() const { return input_; }
@@ -816,8 +1061,8 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
using Base = FixedInputValueNodeT<0, SmiConstant>;
public:
- explicit SmiConstant(size_t input_count, Smi value)
- : Base(input_count), value_(value) {}
+ explicit SmiConstant(uint32_t bitfield, Smi value)
+ : Base(bitfield), value_(value) {}
Smi value() const { return value_; }
@@ -833,8 +1078,8 @@ class Constant : public FixedInputValueNodeT<0, Constant> {
using Base = FixedInputValueNodeT<0, Constant>;
public:
- explicit Constant(size_t input_count, const compiler::HeapObjectRef& object)
- : Base(input_count), object_(object) {}
+ explicit Constant(uint32_t bitfield, const compiler::HeapObjectRef& object)
+ : Base(bitfield), object_(object) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -848,8 +1093,8 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
using Base = FixedInputValueNodeT<0, RootConstant>;
public:
- explicit RootConstant(size_t input_count, RootIndex index)
- : Base(input_count), index_(index) {}
+ explicit RootConstant(uint32_t bitfield, RootIndex index)
+ : Base(bitfield), index_(index) {}
RootIndex index() const { return index_; }
@@ -861,57 +1106,18 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
const RootIndex index_;
};
-class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
- using Base = FixedInputNodeT<0, Checkpoint>;
-
- public:
- explicit Checkpoint(size_t input_count, int bytecode_position,
- bool accumulator_is_live, ValueNode* accumulator)
- : Base(input_count),
- bytecode_position_(bytecode_position),
- accumulator_(accumulator_is_live ? accumulator : nullptr) {}
-
- int bytecode_position() const { return bytecode_position_; }
- bool is_used() const { return IsUsedBit::decode(bit_field_); }
- void SetUsed() { bit_field_ = IsUsedBit::update(bit_field_, true); }
- ValueNode* accumulator() const { return accumulator_; }
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
-
- private:
- using IsUsedBit = NextBitField<bool, 1>;
-
- const int bytecode_position_;
- ValueNode* const accumulator_;
-};
-
-class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
- using Base = FixedInputNodeT<0, SoftDeopt>;
-
- public:
- explicit SoftDeopt(size_t input_count) : Base(input_count) {}
-
- static constexpr OpProperties kProperties = OpProperties::Deopt();
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
-};
-
class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
using Base = FixedInputNodeT<1, CheckMaps>;
public:
- explicit CheckMaps(size_t input_count, const compiler::MapRef& map)
- : Base(input_count), map_(map) {}
+ explicit CheckMaps(uint32_t bitfield, const compiler::MapRef& map)
+ : Base(bitfield), map_(map) {}
// TODO(verwaest): This just calls in deferred code, so probably we'll need to
// mark that to generate stack maps. Mark as call so we at least clear the
// registers since we currently don't properly spill either.
static constexpr OpProperties kProperties =
- OpProperties::Deopt() | OpProperties::Call();
+ OpProperties::EagerDeopt() | OpProperties::Call();
compiler::MapRef map() const { return map_; }
@@ -930,11 +1136,10 @@ class LoadField : public FixedInputValueNodeT<1, LoadField> {
using Base = FixedInputValueNodeT<1, LoadField>;
public:
- explicit LoadField(size_t input_count, int handler)
- : Base(input_count), handler_(handler) {}
+ explicit LoadField(uint32_t bitfield, int handler)
+ : Base(bitfield), handler_(handler) {}
- // The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::Reading();
int handler() const { return handler_; }
@@ -953,8 +1158,10 @@ class StoreField : public FixedInputNodeT<2, StoreField> {
using Base = FixedInputNodeT<2, StoreField>;
public:
- explicit StoreField(size_t input_count, int handler)
- : Base(input_count), handler_(handler) {}
+ explicit StoreField(uint32_t bitfield, int handler)
+ : Base(bitfield), handler_(handler) {}
+
+ static constexpr OpProperties kProperties = OpProperties::Writing();
int handler() const { return handler_; }
@@ -975,11 +1182,11 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
using Base = FixedInputValueNodeT<1, LoadGlobal>;
public:
- explicit LoadGlobal(size_t input_count, const compiler::NameRef& name)
- : Base(input_count), name_(name) {}
+ explicit LoadGlobal(uint32_t bitfield, const compiler::NameRef& name)
+ : Base(bitfield), name_(name) {}
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
Input& context() { return input(0); }
const compiler::NameRef& name() const { return name_; }
@@ -996,13 +1203,15 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
using Base = FixedInputValueNodeT<2, LoadNamedGeneric>;
public:
- explicit LoadNamedGeneric(size_t input_count, const compiler::NameRef& name)
- : Base(input_count), name_(name) {}
+ explicit LoadNamedGeneric(uint32_t bitfield, const compiler::NameRef& name,
+ const compiler::FeedbackSource& feedback)
+ : Base(bitfield), name_(name), feedback_(feedback) {}
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
compiler::NameRef name() const { return name_; }
+ compiler::FeedbackSource feedback() const { return feedback_; }
static constexpr int kContextIndex = 0;
static constexpr int kObjectIndex = 1;
@@ -1015,35 +1224,16 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
private:
const compiler::NameRef name_;
-};
-
-class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
- using Base = FixedInputNodeT<0, StoreToFrame>;
-
- public:
- StoreToFrame(size_t input_count, ValueNode* value,
- interpreter::Register target)
- : Base(input_count), value_(value), target_(target) {}
-
- interpreter::Register target() const { return target_; }
- ValueNode* value() const { return value_; }
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
-
- private:
- ValueNode* const value_;
- const interpreter::Register target_;
+ const compiler::FeedbackSource feedback_;
};
class GapMove : public FixedInputNodeT<0, GapMove> {
using Base = FixedInputNodeT<0, GapMove>;
public:
- GapMove(size_t input_count, compiler::AllocatedOperand source,
+ GapMove(uint32_t bitfield, compiler::AllocatedOperand source,
compiler::AllocatedOperand target)
- : Base(input_count), source_(source), target_(target) {}
+ : Base(bitfield), source_(source), target_(target) {}
compiler::AllocatedOperand source() const { return source_; }
compiler::AllocatedOperand target() const { return target_; }
@@ -1067,8 +1257,8 @@ class Phi : public ValueNodeT<Phi> {
using List = base::ThreadedList<Phi>;
// TODO(jgruber): More intuitive constructors, if possible.
- Phi(size_t input_count, interpreter::Register owner, int merge_offset)
- : Base(input_count), owner_(owner), merge_offset_(merge_offset) {}
+ Phi(uint32_t bitfield, interpreter::Register owner, int merge_offset)
+ : Base(bitfield), owner_(owner), merge_offset_(merge_offset) {}
interpreter::Register owner() const { return owner_; }
int merge_offset() const { return merge_offset_; }
@@ -1090,54 +1280,42 @@ class Phi : public ValueNodeT<Phi> {
friend base::ThreadedListTraits<Phi>;
};
-class CallProperty : public ValueNodeT<CallProperty> {
- using Base = ValueNodeT<CallProperty>;
+class Call : public ValueNodeT<Call> {
+ using Base = ValueNodeT<Call>;
public:
- explicit CallProperty(size_t input_count) : Base(input_count) {}
+ // We assume function and context as fixed inputs.
+ static constexpr int kFunctionIndex = 0;
+ static constexpr int kContextIndex = 1;
+ static constexpr int kFixedInputCount = 2;
// This ctor is used when for variable input counts.
// Inputs must be initialized manually.
- CallProperty(size_t input_count, ValueNode* function, ValueNode* context)
- : Base(input_count) {
- set_input(0, function);
- set_input(1, context);
+ Call(uint32_t bitfield, ConvertReceiverMode mode, ValueNode* function,
+ ValueNode* context)
+ : Base(bitfield), receiver_mode_(mode) {
+ set_input(kFunctionIndex, function);
+ set_input(kContextIndex, context);
}
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties = OpProperties::JSCall();
- Input& function() { return input(0); }
- const Input& function() const { return input(0); }
- Input& context() { return input(1); }
- const Input& context() const { return input(1); }
- int num_args() const { return input_count() - 2; }
- Input& arg(int i) { return input(i + 2); }
- void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
+ Input& function() { return input(kFunctionIndex); }
+ const Input& function() const { return input(kFunctionIndex); }
+ Input& context() { return input(kContextIndex); }
+ const Input& context() const { return input(kContextIndex); }
+ int num_args() const { return input_count() - kFixedInputCount; }
+ Input& arg(int i) { return input(i + kFixedInputCount); }
+ void set_arg(int i, ValueNode* node) {
+ set_input(i + kFixedInputCount, node);
+ }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
-};
-
-class CallUndefinedReceiver : public ValueNodeT<CallUndefinedReceiver> {
- using Base = ValueNodeT<CallUndefinedReceiver>;
-
- public:
- explicit CallUndefinedReceiver(size_t input_count) : Base(input_count) {}
- static constexpr OpProperties kProperties = OpProperties::Call();
-
- Input& function() { return input(0); }
- const Input& function() const { return input(0); }
- Input& context() { return input(1); }
- const Input& context() const { return input(1); }
- int num_args() const { return input_count() - 2; }
- Input& arg(int i) { return input(i + 2); }
- void set_arg(int i, ValueNode* node) { set_input(i + 2, node); }
-
- void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
- void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
- void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+ private:
+ ConvertReceiverMode receiver_mode_;
};
// Represents either a direct BasicBlock pointer, or an entry in a list of
@@ -1246,13 +1424,13 @@ class ControlNode : public NodeBase {
}
void set_next_post_dominating_hole(ControlNode* node) {
DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() ||
+ node->Is<Deopt>() ||
node->Is<JumpLoop>());
next_post_dominating_hole_ = node;
}
protected:
- explicit ControlNode(Opcode opcode, size_t input_count)
- : NodeBase(opcode, input_count) {}
+ using NodeBase::NodeBase;
private:
ControlNode* next_post_dominating_hole_ = nullptr;
@@ -1265,12 +1443,11 @@ class UnconditionalControlNode : public ControlNode {
void set_predecessor_id(int id) { predecessor_id_ = id; }
protected:
- explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
+ explicit UnconditionalControlNode(uint32_t bitfield,
BasicBlockRef* target_refs)
- : ControlNode(opcode, input_count), target_(target_refs) {}
- explicit UnconditionalControlNode(Opcode opcode, size_t input_count,
- BasicBlock* target)
- : ControlNode(opcode, input_count), target_(target) {}
+ : ControlNode(bitfield), target_(target_refs) {}
+ explicit UnconditionalControlNode(uint32_t bitfield, BasicBlock* target)
+ : ControlNode(bitfield), target_(target) {}
private:
const BasicBlockRef target_;
@@ -1292,25 +1469,24 @@ class UnconditionalControlNodeT : public UnconditionalControlNode {
}
protected:
- explicit UnconditionalControlNodeT(size_t input_count,
+ explicit UnconditionalControlNodeT(uint32_t bitfield,
BasicBlockRef* target_refs)
- : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target_refs) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ : UnconditionalControlNode(bitfield, target_refs) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
- explicit UnconditionalControlNodeT(size_t input_count, BasicBlock* target)
- : UnconditionalControlNode(opcode_of<Derived>, kInputCount, target) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ explicit UnconditionalControlNodeT(uint32_t bitfield, BasicBlock* target)
+ : UnconditionalControlNode(bitfield, target) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
class ConditionalControlNode : public ControlNode {
public:
- ConditionalControlNode(Opcode opcode, size_t input_count,
- BasicBlockRef* if_true_refs,
+ ConditionalControlNode(uint32_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : ControlNode(opcode, input_count),
+ : ControlNode(bitfield),
if_true_(if_true_refs),
if_false_(if_false_refs) {}
@@ -1337,13 +1513,12 @@ class ConditionalControlNodeT : public ConditionalControlNode {
}
protected:
- explicit ConditionalControlNodeT(size_t input_count,
+ explicit ConditionalControlNodeT(uint32_t bitfield,
BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : ConditionalControlNode(opcode_of<Derived>, kInputCount, if_true_refs,
- if_false_refs) {
- DCHECK_EQ(input_count, kInputCount);
- USE(input_count);
+ : ConditionalControlNode(bitfield, if_true_refs, if_false_refs) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
+ DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
};
@@ -1351,8 +1526,8 @@ class Jump : public UnconditionalControlNodeT<Jump> {
using Base = UnconditionalControlNodeT<Jump>;
public:
- explicit Jump(size_t input_count, BasicBlockRef* target_refs)
- : Base(input_count, target_refs) {}
+ explicit Jump(uint32_t bitfield, BasicBlockRef* target_refs)
+ : Base(bitfield, target_refs) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -1363,11 +1538,11 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
using Base = UnconditionalControlNodeT<JumpLoop>;
public:
- explicit JumpLoop(size_t input_count, BasicBlock* target)
- : Base(input_count, target) {}
+ explicit JumpLoop(uint32_t bitfield, BasicBlock* target)
+ : Base(bitfield, target) {}
- explicit JumpLoop(size_t input_count, BasicBlockRef* ref)
- : Base(input_count, ref) {}
+ explicit JumpLoop(uint32_t bitfield, BasicBlockRef* ref)
+ : Base(bitfield, ref) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -1376,8 +1551,9 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
class Return : public ControlNode {
public:
- explicit Return(size_t input_count)
- : ControlNode(opcode_of<Return>, input_count) {}
+ explicit Return(uint32_t bitfield) : ControlNode(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>);
+ }
Input& value_input() { return input(0); }
@@ -1386,13 +1562,26 @@ class Return : public ControlNode {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
+class Deopt : public ControlNode {
+ public:
+ explicit Deopt(uint32_t bitfield) : ControlNode(bitfield) {
+ DCHECK_EQ(NodeBase::opcode(), opcode_of<Deopt>);
+ }
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
+
+ void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
+ void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
+ void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
+};
+
class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> {
using Base = ConditionalControlNodeT<1, BranchIfTrue>;
public:
- explicit BranchIfTrue(size_t input_count, BasicBlockRef* if_true_refs,
+ explicit BranchIfTrue(uint32_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : Base(input_count, if_true_refs, if_false_refs) {}
+ : Base(bitfield, if_true_refs, if_false_refs) {}
Input& condition_input() { return input(0); }
@@ -1406,10 +1595,9 @@ class BranchIfToBooleanTrue
using Base = ConditionalControlNodeT<1, BranchIfToBooleanTrue>;
public:
- explicit BranchIfToBooleanTrue(size_t input_count,
- BasicBlockRef* if_true_refs,
+ explicit BranchIfToBooleanTrue(uint32_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : Base(input_count, if_true_refs, if_false_refs) {}
+ : Base(bitfield, if_true_refs, if_false_refs) {}
static constexpr OpProperties kProperties = OpProperties::Call();
@@ -1430,10 +1618,10 @@ class BranchIfCompare
Input& left_input() { return NodeBase::input(kLeftIndex); }
Input& right_input() { return NodeBase::input(kRightIndex); }
- explicit BranchIfCompare(size_t input_count, Operation operation,
+ explicit BranchIfCompare(uint32_t bitfield, Operation operation,
BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
- : Base(input_count, if_true_refs, if_false_refs), operation_(operation) {}
+ : Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
@@ -1443,17 +1631,6 @@ class BranchIfCompare
Operation operation_;
};
-const OpProperties& NodeBase::properties() const {
- switch (opcode()) {
-#define V(Name) \
- case Opcode::k##Name: \
- return Name::kProperties;
- NODE_BASE_LIST(V)
-#undef V
- }
- UNREACHABLE();
-}
-
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-regalloc.cc b/deps/v8/src/maglev/maglev-regalloc.cc
index 897f2a2d0e..a18fe1547c 100644
--- a/deps/v8/src/maglev/maglev-regalloc.cc
+++ b/deps/v8/src/maglev/maglev-regalloc.cc
@@ -60,9 +60,7 @@ ControlNode* NearestPostDominatingHole(ControlNode* node) {
bool IsLiveAtTarget(ValueNode* node, ControlNode* source, BasicBlock* target) {
DCHECK_NOT_NULL(node);
-
- // TODO(leszeks): We shouldn't have any dead nodes passed into here.
- if (node->is_dead()) return false;
+ DCHECK(!node->is_dead());
// If we're looping, a value can only be live if it was live before the loop.
if (target->control_node()->id() <= source->id()) {
@@ -177,7 +175,8 @@ void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
// If the first branch returns or jumps back, we've found highest
// reachable control-node of the longest branch (the second control
// node).
- if (first->Is<Return>() || first->Is<JumpLoop>()) {
+ if (first->Is<Return>() || first->Is<Deopt>() ||
+ first->Is<JumpLoop>()) {
control->set_next_post_dominating_hole(second);
break;
}
@@ -242,6 +241,9 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
} else if (control->Is<Return>()) {
printing_visitor_->os() << " " << control->id() << ".";
break;
+ } else if (control->Is<Deopt>()) {
+ printing_visitor_->os() << " " << control->id() << "✖️";
+ break;
} else if (control->Is<JumpLoop>()) {
printing_visitor_->os() << " " << control->id() << "↰";
break;
@@ -270,8 +272,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
compiler::AllocatedOperand::cast(allocation));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
- phi, ProcessingState(compilation_unit_, block_it_, nullptr,
- nullptr, nullptr));
+ phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (new reg) " << phi->result().operand() << std::endl;
}
@@ -285,8 +286,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
phi->result().SetAllocated(phi->spill_slot());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
- phi, ProcessingState(compilation_unit_, block_it_, nullptr,
- nullptr, nullptr));
+ phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (stack) " << phi->result().operand() << std::endl;
}
@@ -307,45 +307,68 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
}
}
-void StraightForwardRegisterAllocator::UpdateInputUse(uint32_t use,
- const Input& input) {
- ValueNode* node = input.node();
-
- // The value was already cleared through a previous input.
- if (node->is_dead()) return;
+void StraightForwardRegisterAllocator::UpdateUse(
+ ValueNode* node, InputLocation* input_location) {
+ DCHECK(!node->is_dead());
// Update the next use.
- node->set_next_use(input.next_use_id());
+ node->set_next_use(input_location->next_use_id());
+
+ if (!node->is_dead()) return;
// If a value is dead, make sure it's cleared.
- if (node->is_dead()) {
- FreeRegisters(node);
+ FreeRegisters(node);
+}
- // If the stack slot is a local slot, free it so it can be reused.
- if (node->is_spilled()) {
- compiler::AllocatedOperand slot = node->spill_slot();
- if (slot.index() > 0) free_slots_.push_back(slot.index());
- }
- return;
- }
+void StraightForwardRegisterAllocator::UpdateUse(
+ const EagerDeoptInfo& deopt_info) {
+ const CompactInterpreterFrameState* checkpoint_state =
+ deopt_info.state.register_frame;
+ int index = 0;
+ checkpoint_state->ForEachValue(
+ *compilation_unit_, [&](ValueNode* node, interpreter::Register reg) {
+ InputLocation* input = &deopt_info.input_locations[index++];
+ input->InjectAllocated(node->allocation());
+ UpdateUse(node, input);
+ });
+}
+
+void StraightForwardRegisterAllocator::UpdateUse(
+ const LazyDeoptInfo& deopt_info) {
+ const CompactInterpreterFrameState* checkpoint_state =
+ deopt_info.state.register_frame;
+ int index = 0;
+ checkpoint_state->ForEachValue(
+ *compilation_unit_, [&](ValueNode* node, interpreter::Register reg) {
+ // Skip over the result location.
+ if (reg == deopt_info.result_location) return;
+ InputLocation* input = &deopt_info.input_locations[index++];
+ input->InjectAllocated(node->allocation());
+ UpdateUse(node, input);
+ });
}
void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
for (Input& input : *node) AssignInput(input);
AssignTemporaries(node);
- for (Input& input : *node) UpdateInputUse(node->id(), input);
+ if (node->properties().can_eager_deopt()) {
+ UpdateUse(*node->eager_deopt_info());
+ }
+ for (Input& input : *node) UpdateUse(&input);
if (node->properties().is_call()) SpillAndClearRegisters();
- // TODO(verwaest): This isn't a good idea :)
- if (node->properties().can_deopt()) SpillRegisters();
// Allocate node output.
if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
+ // Lazy deopts are semantically after the node, so update them last.
+ if (node->properties().can_lazy_deopt()) {
+ UpdateUse(*node->lazy_deopt_info());
+ }
+
if (FLAG_trace_maglev_regalloc) {
- printing_visitor_->Process(
- node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
- nullptr));
+ printing_visitor_->Process(node,
+ ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << "\n";
@@ -477,7 +500,10 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) {
for (Input& input : *node) AssignInput(input);
AssignTemporaries(node);
- for (Input& input : *node) UpdateInputUse(node->id(), input);
+ if (node->properties().can_eager_deopt()) {
+ UpdateUse(*node->eager_deopt_info());
+ }
+ for (Input& input : *node) UpdateUse(&input);
if (node->properties().is_call()) SpillAndClearRegisters();
@@ -490,14 +516,12 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
Input& input = phi->input(block->predecessor_id());
input.InjectAllocated(input.node()->allocation());
}
- for (Phi* phi : *phis) {
- UpdateInputUse(phi->id(), phi->input(block->predecessor_id()));
- }
+ for (Phi* phi : *phis) UpdateUse(&phi->input(block->predecessor_id()));
}
}
// TODO(verwaest): This isn't a good idea :)
- if (node->properties().can_deopt()) SpillRegisters();
+ if (node->properties().can_eager_deopt()) SpillRegisters();
// Merge register values. Values only flowing into phis and not being
// independently live will be killed as part of the merge.
@@ -513,9 +537,8 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
}
if (FLAG_trace_maglev_regalloc) {
- printing_visitor_->Process(
- node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
- nullptr));
+ printing_visitor_->Process(node,
+ ProcessingState(compilation_unit_, block_it_));
}
}
@@ -528,8 +551,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
phi->result().SetAllocated(ForceAllocate(reg, phi));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
- phi, ProcessingState(compilation_unit_, block_it_, nullptr,
- nullptr, nullptr));
+ phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (reuse) " << input.operand() << std::endl;
}
@@ -629,13 +651,8 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
DCHECK(!node->is_spilled());
- uint32_t free_slot;
- if (free_slots_.empty()) {
- free_slot = top_of_stack_++;
- } else {
- free_slot = free_slots_.back();
- free_slots_.pop_back();
- }
+ uint32_t free_slot = top_of_stack_++;
+ compilation_unit_->push_stack_value_repr(node->value_representation());
node->Spill(compiler::AllocatedOperand(compiler::AllocatedOperand::STACK_SLOT,
MachineRepresentation::kTagged,
free_slot));
@@ -659,6 +676,7 @@ void StraightForwardRegisterAllocator::FreeSomeRegister() {
}
}
DCHECK(best.is_valid());
+ DropRegisterValue(best);
FreeRegister(best);
}
diff --git a/deps/v8/src/maglev/maglev-regalloc.h b/deps/v8/src/maglev/maglev-regalloc.h
index c198d2f8fc..5bc435f24e 100644
--- a/deps/v8/src/maglev/maglev-regalloc.h
+++ b/deps/v8/src/maglev/maglev-regalloc.h
@@ -33,7 +33,6 @@ class StraightForwardRegisterAllocator {
int top_of_stack_ = 0;
RegList free_registers_ = kAllocatableGeneralRegisters;
- std::vector<uint32_t> free_slots_;
RegList used_registers() const {
// Only allocatable registers should be free.
@@ -46,7 +45,10 @@ class StraightForwardRegisterAllocator {
void PrintLiveRegs() const;
- void UpdateInputUse(uint32_t use, const Input& input);
+ void UpdateUse(Input* input) { return UpdateUse(input->node(), input); }
+ void UpdateUse(ValueNode* node, InputLocation* input_location);
+ void UpdateUse(const EagerDeoptInfo& deopt_info);
+ void UpdateUse(const LazyDeoptInfo& deopt_info);
void AllocateControlNode(ControlNode* node, BasicBlock* block);
void AllocateNode(Node* node);
diff --git a/deps/v8/src/maglev/maglev-vreg-allocator.h b/deps/v8/src/maglev/maglev-vreg-allocator.h
index 19d5517f70..269f897a11 100644
--- a/deps/v8/src/maglev/maglev-vreg-allocator.h
+++ b/deps/v8/src/maglev/maglev-vreg-allocator.h
@@ -26,8 +26,6 @@ class MaglevVregAllocationState {
class MaglevVregAllocator {
public:
- static constexpr bool kNeedsCheckpointStates = true;
-
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
for (BasicBlock* block : *graph) {
diff --git a/deps/v8/src/maglev/maglev.cc b/deps/v8/src/maglev/maglev.cc
index 6397d02e60..0240fb3261 100644
--- a/deps/v8/src/maglev/maglev.cc
+++ b/deps/v8/src/maglev/maglev.cc
@@ -16,7 +16,7 @@ MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
DCHECK(FLAG_maglev);
auto info = maglev::MaglevCompilationInfo::New(isolate, function);
maglev::MaglevCompilationUnit* const unit = info->toplevel_compilation_unit();
- maglev::MaglevCompiler::Compile(unit);
+ maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(), unit);
return maglev::MaglevCompiler::GenerateCode(unit);
}