summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/turboshaft/graph.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/turboshaft/graph.h')
-rw-r--r--deps/v8/src/compiler/turboshaft/graph.h325
1 files changed, 275 insertions, 50 deletions
diff --git a/deps/v8/src/compiler/turboshaft/graph.h b/deps/v8/src/compiler/turboshaft/graph.h
index 4704646324..8f77f08ebe 100644
--- a/deps/v8/src/compiler/turboshaft/graph.h
+++ b/deps/v8/src/compiler/turboshaft/graph.h
@@ -9,19 +9,22 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <tuple>
#include <type_traits>
#include "src/base/iterator.h"
+#include "src/base/logging.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/sidetable.h"
+#include "src/compiler/turboshaft/types.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
-template <template <class> class... Reducers>
+template <class Reducers>
class Assembler;
// `OperationBuffer` is a growable, Zone-allocated buffer to store Turboshaft
@@ -68,6 +71,7 @@ class OperationBuffer {
};
explicit OperationBuffer(Zone* zone, size_t initial_capacity) : zone_(zone) {
+ DCHECK_NE(initial_capacity, 0);
begin_ = end_ = zone_->NewArray<OperationStorageSlot>(initial_capacity);
operation_sizes_ =
zone_->NewArray<uint16_t>((initial_capacity + 1) / kSlotsPerId);
@@ -274,28 +278,18 @@ class Block : public RandomAccessStackDominatorNode<Block> {
bool IsBranchTarget() const { return kind_ == Kind::kBranchTarget; }
bool IsHandler() const { return false; }
bool IsSwitchCase() const { return false; }
+
Kind kind() const { return kind_; }
void SetKind(Kind kind) { kind_ = kind; }
BlockIndex index() const { return index_; }
- bool IsDeferred() const { return deferred_; }
- void SetDeferred(bool deferred) { deferred_ = deferred; }
-
bool Contains(OpIndex op_idx) const {
return begin_ <= op_idx && op_idx < end_;
}
bool IsBound() const { return index_ != BlockIndex::Invalid(); }
- void AddPredecessor(Block* predecessor) {
- DCHECK(!IsBound() ||
- (Predecessors().size() == 1 && kind_ == Kind::kLoopHeader));
- DCHECK_EQ(predecessor->neighboring_predecessor_, nullptr);
- predecessor->neighboring_predecessor_ = last_predecessor_;
- last_predecessor_ = predecessor;
- }
-
base::SmallVector<Block*, 8> Predecessors() const {
base::SmallVector<Block*, 8> result;
for (Block* pred = last_predecessor_; pred != nullptr;
@@ -306,6 +300,9 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return result;
}
+ // TODO(dmercadier): we should store predecessor count in the Blocks directly
+ // (or in the Graph, or in the Assembler), to avoid this O(n) PredecessorCount
+ // method.
int PredecessorCount() const {
int count = 0;
for (Block* pred = last_predecessor_; pred != nullptr;
@@ -315,18 +312,79 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return count;
}
+ // Returns the index of {target} in the predecessors of the current Block.
+ // If {target} is not a direct predecessor, returns -1.
+ int GetPredecessorIndex(const Block* target) const {
+ int pred_count = 0;
+ int pred_reverse_index = -1;
+ for (Block* pred = last_predecessor_; pred != nullptr;
+ pred = pred->neighboring_predecessor_) {
+ if (pred == target) {
+ DCHECK_EQ(pred_reverse_index, -1);
+ pred_reverse_index = pred_count;
+ }
+ pred_count++;
+ }
+ if (pred_reverse_index == -1) {
+ return -1;
+ }
+ return pred_count - pred_reverse_index - 1;
+ }
+
+ // HasExactlyNPredecessors(n) returns the same result as
+ // `PredecessorCount() == n`, but stops early and iterates at most the first
+ // {n} predecessors.
+ bool HasExactlyNPredecessors(unsigned int n) const {
+ Block* current_pred = last_predecessor_;
+ while (current_pred != nullptr && n != 0) {
+ current_pred = current_pred->neighboring_predecessor_;
+ n--;
+ }
+ return n == 0 && current_pred == nullptr;
+ }
+
Block* LastPredecessor() const { return last_predecessor_; }
Block* NeighboringPredecessor() const { return neighboring_predecessor_; }
bool HasPredecessors() const { return last_predecessor_ != nullptr; }
+ void ResetLastPredecessor() { last_predecessor_ = nullptr; }
- // The block from the previous graph which produced the current block. This is
- // used for translating phi nodes from the previous graph.
+ void SetMappingToNextGraph(Block* next_graph_block) {
+ DCHECK_NULL(next_graph_mapping_);
+ DCHECK_NOT_NULL(next_graph_block);
+ next_graph_mapping_ = next_graph_block;
+ next_graph_block->SetOrigin(this);
+ }
+ Block* MapToNextGraph() const {
+ DCHECK_NOT_NULL(next_graph_mapping_);
+ return next_graph_mapping_;
+ }
+ // The block from the previous graph which produced the current block. This
+ // has to be updated to be the last block that contributed operations to the
+ // current block to ensure that phi nodes are created correctly.git cl
void SetOrigin(const Block* origin) {
- DCHECK_NULL(origin_);
- DCHECK_EQ(origin->graph_generation_ + 1, graph_generation_);
+ DCHECK_IMPLIES(origin != nullptr,
+ origin->graph_generation_ + 1 == graph_generation_);
origin_ = origin;
}
- const Block* Origin() const { return origin_; }
+ // The block from the input graph that is equivalent as a predecessor. It is
+ // only available for bound blocks and it does *not* refer to an equivalent
+ // block as a branch destination.
+ const Block* OriginForBlockEnd() const {
+ DCHECK(IsBound());
+ return origin_;
+ }
+ // The block from the input graph that corresponds to the current block as a
+ // branch destination. Such a block might not exist, and this function uses a
+ // trick to compute such a block in almost all cases, but might rarely fail
+ // and return `nullptr` instead.
+ const Block* OriginForBlockStart() const {
+ // Check that `origin_` is still valid as a block start and was not changed
+ // to a semantically different block when inlining blocks.
+ if (origin_ && origin_->MapToNextGraph() == this) {
+ return origin_;
+ }
+ return nullptr;
+ }
OpIndex begin() const {
DCHECK(begin_.valid());
@@ -337,9 +395,27 @@ class Block : public RandomAccessStackDominatorNode<Block> {
return end_;
}
+ // Might return nullptr if the first operation is invalid.
+ const Operation& FirstOperation(const Graph& graph) const;
+ const Operation& LastOperation(const Graph& graph) const;
+
+ bool EndsWithBranchingOp(const Graph& graph) const {
+ switch (LastOperation(graph).opcode) {
+ case Opcode::kBranch:
+ case Opcode::kSwitch:
+ case Opcode::kCallAndCatchException:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool HasPhis(const Graph& graph) const;
+
// Computes the dominators of the this block, assuming that the dominators of
- // its predecessors are already computed.
- void ComputeDominator();
+ // its predecessors are already computed. Returns the depth of the current
+ // block in the dominator tree.
+ uint32_t ComputeDominator();
void PrintDominatorTree(
std::vector<const char*> tree_symbols = std::vector<const char*>(),
@@ -347,22 +423,45 @@ class Block : public RandomAccessStackDominatorNode<Block> {
explicit Block(Kind kind) : kind_(kind) {}
+ uint32_t& custom_data() { return custom_data_; }
+ const uint32_t& custom_data() const { return custom_data_; }
+
private:
+ // AddPredecessor should never be called directly except from Assembler's
+ // AddPredecessor and SplitEdge methods, which takes care of maintaining
+ // split-edge form.
+ void AddPredecessor(Block* predecessor) {
+ DCHECK(!IsBound() ||
+ (Predecessors().size() == 1 && kind_ == Kind::kLoopHeader));
+ DCHECK_EQ(predecessor->neighboring_predecessor_, nullptr);
+ predecessor->neighboring_predecessor_ = last_predecessor_;
+ last_predecessor_ = predecessor;
+ }
+
friend class Graph;
+ template <class Reducers>
+ friend class Assembler;
Kind kind_;
- bool deferred_ = false;
OpIndex begin_ = OpIndex::Invalid();
OpIndex end_ = OpIndex::Invalid();
BlockIndex index_ = BlockIndex::Invalid();
Block* last_predecessor_ = nullptr;
Block* neighboring_predecessor_ = nullptr;
const Block* origin_ = nullptr;
+ Block* next_graph_mapping_ = nullptr;
+ // The {custom_data_} field can be used by algorithms to temporarily store
+ // block-specific data. This field is not preserved when constructing a new
+ // output graph and algorithms cannot rely on this field being properly reset
+ // after previous uses.
+ uint32_t custom_data_ = 0;
#ifdef DEBUG
size_t graph_generation_ = 0;
#endif
};
+std::ostream& operator<<(std::ostream& os, const Block* b);
+
class Graph {
public:
// A big initial capacity prevents many growing steps. It also makes sense
@@ -373,7 +472,14 @@ class Graph {
all_blocks_(graph_zone),
graph_zone_(graph_zone),
source_positions_(graph_zone),
- operation_origins_(graph_zone) {}
+ operation_origins_(graph_zone),
+ operation_types_(graph_zone)
+#ifdef DEBUG
+ ,
+ block_type_refinement_(graph_zone)
+#endif
+ {
+ }
// Reset the graph to recycle its memory.
void Reset() {
@@ -381,7 +487,12 @@ class Graph {
bound_blocks_.clear();
source_positions_.Reset();
operation_origins_.Reset();
+ operation_types_.Reset();
next_block_ = 0;
+ dominator_tree_depth_ = 0;
+#ifdef DEBUG
+ block_type_refinement_.Reset();
+#endif
}
V8_INLINE const Operation& Get(OpIndex i) const {
@@ -403,6 +514,8 @@ class Graph {
return *ptr;
}
+ void MarkAsUnused(OpIndex i) { Get(i).saturated_use_count = 0; }
+
const Block& StartBlock() const { return Get(BlockIndex(0)); }
Block& Get(BlockIndex i) {
@@ -415,6 +528,19 @@ class Graph {
}
OpIndex Index(const Operation& op) const { return operations_.Index(op); }
+ BlockIndex BlockOf(OpIndex index) const {
+ auto it = std::upper_bound(
+ bound_blocks_.begin(), bound_blocks_.end(), index,
+ [](OpIndex value, const Block* b) { return value < b->begin_; });
+ DCHECK_NE(it, bound_blocks_.begin());
+ --it;
+ return (*it)->index();
+ }
+
+ OpIndex NextIndex(const OpIndex idx) const { return operations_.Next(idx); }
+ OpIndex PreviousIndex(const OpIndex idx) const {
+ return operations_.Previous(idx);
+ }
OperationStorageSlot* Allocate(size_t slot_count) {
return operations_.Allocate(slot_count);
@@ -432,6 +558,16 @@ class Graph {
#endif // DEBUG
Op& op = Op::New(this, args...);
IncrementInputUses(op);
+
+ if (op.Properties().is_required_when_unused) {
+ // Once the graph is built, an operation with a `saturated_use_count` of 0
+ // is guaranteed to be unused and can be removed. Thus, to avoid removing
+ // operations that never have uses (such as Goto or Branch), we set the
+ // `saturated_use_count` of Operations that are `required_when_unused`
+ // to 1.
+ op.saturated_use_count = 1;
+ }
+
DCHECK_EQ(result, Index(op));
#ifdef DEBUG
for (OpIndex input : op.inputs()) {
@@ -458,43 +594,29 @@ class Graph {
IncrementInputUses(*new_op);
}
- V8_INLINE Block* NewBlock(Block::Kind kind) {
- if (V8_UNLIKELY(next_block_ == all_blocks_.size())) {
- constexpr size_t new_block_count = 64;
- base::Vector<Block> blocks =
- graph_zone_->NewVector<Block>(new_block_count, Block(kind));
- for (size_t i = 0; i < new_block_count; ++i) {
- all_blocks_.push_back(&blocks[i]);
- }
- }
- Block* result = all_blocks_[next_block_++];
- *result = Block(kind);
-#ifdef DEBUG
- result->graph_generation_ = generation_;
-#endif
- return result;
+ V8_INLINE Block* NewLoopHeader() {
+ return NewBlock(Block::Kind::kLoopHeader);
+ }
+ V8_INLINE Block* NewBlock() { return NewBlock(Block::Kind::kMerge); }
+ V8_INLINE Block* NewMappedBlock(Block* origin) {
+ Block* new_block = NewBlock(origin->IsLoop() ? Block::Kind::kLoopHeader
+ : Block::Kind::kMerge);
+ origin->SetMappingToNextGraph(new_block);
+ return new_block;
}
V8_INLINE bool Add(Block* block) {
DCHECK_EQ(block->graph_generation_, generation_);
if (!bound_blocks_.empty() && !block->HasPredecessors()) return false;
- if (!block->IsDeferred()) {
- bool deferred = true;
- for (Block* pred = block->last_predecessor_; pred != nullptr;
- pred = pred->neighboring_predecessor_) {
- if (!pred->IsDeferred()) {
- deferred = false;
- break;
- }
- }
- block->SetDeferred(deferred);
- }
+
DCHECK(!block->begin_.valid());
block->begin_ = next_operation_index();
DCHECK_EQ(block->index_, BlockIndex::Invalid());
- block->index_ = BlockIndex(static_cast<uint32_t>(bound_blocks_.size()));
+ block->index_ = next_block_index();
bound_blocks_.push_back(block);
- block->ComputeDominator();
+ uint32_t depth = block->ComputeDominator();
+ dominator_tree_depth_ = std::max<uint32_t>(dominator_tree_depth_, depth);
+
return true;
}
@@ -517,6 +639,9 @@ class Graph {
}
OpIndex next_operation_index() const { return operations_.EndIndex(); }
+ BlockIndex next_block_index() const {
+ return BlockIndex(static_cast<uint32_t>(bound_blocks_.size()));
+ }
Zone* graph_zone() const { return graph_zone_; }
uint32_t block_count() const {
@@ -648,6 +773,11 @@ class Graph {
bound_blocks_.size())};
}
+ bool IsLoopBackedge(const GotoOp& op) const {
+ DCHECK(op.destination->IsBound());
+ return op.destination->begin() <= Index(op);
+ }
+
bool IsValid(OpIndex i) const { return i < next_operation_index(); }
const GrowingSidetable<SourcePosition>& source_positions() const {
@@ -662,6 +792,24 @@ class Graph {
}
GrowingSidetable<OpIndex>& operation_origins() { return operation_origins_; }
+ uint32_t DominatorTreeDepth() const { return dominator_tree_depth_; }
+ const GrowingSidetable<Type>& operation_types() const {
+ return operation_types_;
+ }
+ GrowingSidetable<Type>& operation_types() { return operation_types_; }
+#ifdef DEBUG
+ // Store refined types per block here for --trace-turbo printing.
+ // TODO(nicohartmann@): Remove this once we have a proper way to print
+ // type information inside the reducers.
+ using TypeRefinements = std::vector<std::pair<OpIndex, Type>>;
+ const GrowingBlockSidetable<TypeRefinements>& block_type_refinement() const {
+ return block_type_refinement_;
+ }
+ GrowingBlockSidetable<TypeRefinements>& block_type_refinement() {
+ return block_type_refinement_;
+ }
+#endif // DEBUG
+
Graph& GetOrCreateCompanion() {
if (!companion_) {
companion_ = std::make_unique<Graph>(graph_zone_, operations_.size());
@@ -683,13 +831,19 @@ class Graph {
std::swap(graph_zone_, companion.graph_zone_);
std::swap(source_positions_, companion.source_positions_);
std::swap(operation_origins_, companion.operation_origins_);
+ std::swap(operation_types_, companion.operation_types_);
#ifdef DEBUG
+ std::swap(block_type_refinement_, companion.block_type_refinement_);
// Update generation index.
DCHECK_EQ(generation_ + 1, companion.generation_);
generation_ = companion.generation_++;
#endif // DEBUG
}
+#ifdef DEBUG
+ size_t generation() const { return generation_; }
+#endif // DEBUG
+
private:
bool InputsValid(const Operation& op) const {
for (OpIndex i : op.inputs()) {
@@ -724,6 +878,23 @@ class Graph {
}
}
+ V8_INLINE Block* NewBlock(Block::Kind kind) {
+ if (V8_UNLIKELY(next_block_ == all_blocks_.size())) {
+ constexpr size_t new_block_count = 64;
+ base::Vector<Block> blocks =
+ graph_zone_->NewVector<Block>(new_block_count, Block(kind));
+ for (size_t i = 0; i < new_block_count; ++i) {
+ all_blocks_.push_back(&blocks[i]);
+ }
+ }
+ Block* result = all_blocks_[next_block_++];
+ *result = Block(kind);
+#ifdef DEBUG
+ result->graph_generation_ = generation_;
+#endif
+ return result;
+ }
+
OperationBuffer operations_;
ZoneVector<Block*> bound_blocks_;
ZoneVector<Block*> all_blocks_;
@@ -731,6 +902,11 @@ class Graph {
Zone* graph_zone_;
GrowingSidetable<SourcePosition> source_positions_;
GrowingSidetable<OpIndex> operation_origins_;
+ uint32_t dominator_tree_depth_ = 0;
+ GrowingSidetable<Type> operation_types_;
+#ifdef DEBUG
+ GrowingBlockSidetable<TypeRefinements> block_type_refinement_;
+#endif
std::unique_ptr<Graph> companion_ = {};
#ifdef DEBUG
@@ -743,14 +919,62 @@ V8_INLINE OperationStorageSlot* AllocateOpStorage(Graph* graph,
return graph->Allocate(slot_count);
}
+V8_INLINE const Operation& Get(const Graph& graph, OpIndex index) {
+ return graph.Get(index);
+}
+
+V8_INLINE const Operation& Block::FirstOperation(const Graph& graph) const {
+ DCHECK_EQ(graph_generation_, graph.generation());
+ DCHECK(begin_.valid());
+ DCHECK(end_.valid());
+ return graph.Get(begin_);
+}
+
+V8_INLINE const Operation& Block::LastOperation(const Graph& graph) const {
+ DCHECK_EQ(graph_generation_, graph.generation());
+ return graph.Get(graph.PreviousIndex(end()));
+}
+
+V8_INLINE bool Block::HasPhis(const Graph& graph) const {
+ DCHECK_EQ(graph_generation_, graph.generation());
+#ifdef DEBUG
+ // Verify that only Phis/FrameStates are found, then all other Phis/
+ // FrameStateOps in the block come consecutively.
+ bool starts_with_phi = false;
+ bool finished_phis = false;
+ for (const auto& op : graph.operations(*this)) {
+ if (op.Is<PhiOp>()) {
+ DCHECK(!finished_phis);
+ starts_with_phi = true;
+ }
+ if (!op.Is<PhiOp>() && !op.Is<FrameStateOp>()) {
+ finished_phis = true;
+ }
+ }
+ return starts_with_phi;
+#else // DEBUG
+ for (const auto& op : graph.operations(*this)) {
+ if (op.Is<PhiOp>()) {
+ return true;
+ } else if (op.Is<FrameStateOp>()) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+ return false;
+#endif // DEBUG
+}
+
struct PrintAsBlockHeader {
const Block& block;
+ BlockIndex block_id = block.index();
};
std::ostream& operator<<(std::ostream& os, PrintAsBlockHeader block);
std::ostream& operator<<(std::ostream& os, const Graph& graph);
std::ostream& operator<<(std::ostream& os, const Block::Kind& kind);
-inline void Block::ComputeDominator() {
+inline uint32_t Block::ComputeDominator() {
if (V8_UNLIKELY(LastPredecessor() == nullptr)) {
// If the block has no predecessors, then it's the start block. We create a
// jmp_ edge to itself, so that the SetDominator algorithm does not need a
@@ -778,6 +1002,7 @@ inline void Block::ComputeDominator() {
DCHECK_NE(jmp_, nullptr);
DCHECK_IMPLIES(nxt_ == nullptr, LastPredecessor() == nullptr);
DCHECK_IMPLIES(len_ == 0, LastPredecessor() == nullptr);
+ return Depth();
}
template <class Derived>