1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
|
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
#include <map>
#include "src/codegen/cpu-features.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-scheduler.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/wasm/simd-shuffle.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
class TickCounter;
namespace compiler {
// Forward declarations.
class BasicBlock;
struct CallBuffer; // TODO(bmeurer): Remove this.
class Linkage;
class OperandGenerator;
class SwitchInfo;
class StateObjectDeduplicator;
// The flags continuation is a way to combine a branch or a materialization
// of a boolean value with an instruction that sets the flags register.
// The whole instruction is treated as a unit by the register allocator, and
// thus no spills or moves can be introduced between the flags-setting
// instruction and the branch or set it should be combined with.
class FlagsContinuation final {
public:
FlagsContinuation() : mode_(kFlags_none) {}
// Creates a new flags continuation from the given condition and true/false
// blocks.
static FlagsContinuation ForBranch(FlagsCondition condition,
BasicBlock* true_block,
BasicBlock* false_block) {
return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
}
static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
BasicBlock* true_block,
BasicBlock* false_block) {
return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
false_block);
}
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* frame_state) {
return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
feedback, frame_state);
}
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimizeAndPoison(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* frame_state) {
return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
reason, feedback, frame_state);
}
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(condition, result);
}
// Creates a new flags continuation for a wasm trap.
static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
Node* result) {
return FlagsContinuation(condition, trap_id, result);
}
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const {
return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
}
bool IsDeoptimize() const {
return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
}
bool IsPoisoned() const {
return mode_ == kFlags_branch_and_poison ||
mode_ == kFlags_deoptimize_and_poison;
}
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
FlagsCondition condition() const {
DCHECK(!IsNone());
return condition_;
}
DeoptimizeKind kind() const {
DCHECK(IsDeoptimize());
return kind_;
}
DeoptimizeReason reason() const {
DCHECK(IsDeoptimize());
return reason_;
}
FeedbackSource const& feedback() const {
DCHECK(IsDeoptimize());
return feedback_;
}
Node* frame_state() const {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
}
Node* result() const {
DCHECK(IsSet());
return frame_state_or_result_;
}
TrapId trap_id() const {
DCHECK(IsTrap());
return trap_id_;
}
BasicBlock* true_block() const {
DCHECK(IsBranch());
return true_block_;
}
BasicBlock* false_block() const {
DCHECK(IsBranch());
return false_block_;
}
void Negate() {
DCHECK(!IsNone());
condition_ = NegateFlagsCondition(condition_);
}
void Commute() {
DCHECK(!IsNone());
condition_ = CommuteFlagsCondition(condition_);
}
void Overwrite(FlagsCondition condition) { condition_ = condition; }
void OverwriteAndNegateIfEqual(FlagsCondition condition) {
DCHECK(condition_ == kEqual || condition_ == kNotEqual);
bool negate = condition_ == kEqual;
condition_ = condition;
if (negate) Negate();
}
void OverwriteUnsignedIfSigned() {
switch (condition_) {
case kSignedLessThan:
condition_ = kUnsignedLessThan;
break;
case kSignedLessThanOrEqual:
condition_ = kUnsignedLessThanOrEqual;
break;
case kSignedGreaterThan:
condition_ = kUnsignedGreaterThan;
break;
case kSignedGreaterThanOrEqual:
condition_ = kUnsignedGreaterThanOrEqual;
break;
default:
break;
}
}
// Encodes this flags continuation into the given opcode.
InstructionCode Encode(InstructionCode opcode) {
opcode |= FlagsModeField::encode(mode_);
if (mode_ != kFlags_none) {
opcode |= FlagsConditionField::encode(condition_);
}
return opcode;
}
private:
FlagsContinuation(FlagsMode mode, FlagsCondition condition,
BasicBlock* true_block, BasicBlock* false_block)
: mode_(mode),
condition_(condition),
true_block_(true_block),
false_block_(false_block) {
DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
DCHECK_NOT_NULL(true_block);
DCHECK_NOT_NULL(false_block);
}
FlagsContinuation(FlagsMode mode, FlagsCondition condition,
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* frame_state)
: mode_(mode),
condition_(condition),
kind_(kind),
reason_(reason),
feedback_(feedback),
frame_state_or_result_(frame_state) {
DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
DCHECK_NOT_NULL(frame_state);
}
FlagsContinuation(FlagsCondition condition, Node* result)
: mode_(kFlags_set),
condition_(condition),
frame_state_or_result_(result) {
DCHECK_NOT_NULL(result);
}
FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
: mode_(kFlags_trap),
condition_(condition),
frame_state_or_result_(result),
trap_id_(trap_id) {
DCHECK_NOT_NULL(result);
}
FlagsMode const mode_;
FlagsCondition condition_;
DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize*
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
};
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
struct PushParameter {
PushParameter(Node* n = nullptr,
LinkageLocation l = LinkageLocation::ForAnyRegister())
: node(n), location(l) {}
Node* node;
LinkageLocation location;
};
enum class FrameStateInputKind { kAny, kStackSlot };
// Instruction selection generates an InstructionSequence for a given Schedule.
class V8_EXPORT_PRIVATE InstructionSelector final {
public:
// Forward declarations.
class Features;
enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
enum EnableScheduling { kDisableScheduling, kEnableScheduling };
enum EnableRootsRelativeAddressing {
kDisableRootsRelativeAddressing,
kEnableRootsRelativeAddressing
};
enum EnableSwitchJumpTable {
kDisableSwitchJumpTable,
kEnableSwitchJumpTable
};
enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
? kEnableScheduling
: kDisableScheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing =
kDisableRootsRelativeAddressing,
PoisoningMitigationLevel poisoning_level =
PoisoningMitigationLevel::kDontPoison,
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
void AddTerminator(Instruction* instr);
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
// ===========================================================================
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, size_t temp_count = 0,
InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, size_t temp_count = 0,
InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
InstructionOperand e, size_t temp_count = 0,
InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
InstructionOperand e, InstructionOperand f,
size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count = 0,
InstructionOperand* temps = nullptr);
Instruction* Emit(Instruction* instr);
// [0-3] operand instructions with no output, uses labels for true and false
// blocks of the continuation.
Instruction* EmitWithContinuation(InstructionCode opcode,
FlagsContinuation* cont);
Instruction* EmitWithContinuation(InstructionCode opcode,
InstructionOperand a,
FlagsContinuation* cont);
Instruction* EmitWithContinuation(InstructionCode opcode,
InstructionOperand a, InstructionOperand b,
FlagsContinuation* cont);
Instruction* EmitWithContinuation(InstructionCode opcode,
InstructionOperand a, InstructionOperand b,
InstructionOperand c,
FlagsContinuation* cont);
Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs,
size_t input_count,
InstructionOperand* inputs,
FlagsContinuation* cont);
Instruction* EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont);
void EmitIdentity(Node* node);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
// ===========================================================================
class Features final {
public:
Features() : bits_(0) {}
explicit Features(unsigned bits) : bits_(bits) {}
explicit Features(CpuFeature f) : bits_(1u << f) {}
Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
private:
unsigned bits_;
};
bool IsSupported(CpuFeature feature) const {
return features_.Contains(feature);
}
// Returns the features supported on the target platform.
static Features SupportedFeatures() {
return Features(CpuFeatures::SupportedFeatures());
}
// TODO(sigurds) This should take a CpuFeatures argument.
static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
bool NeedsPoisoning(IsSafetyCheck safety_check) const;
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
// Used in pattern matching during code generation.
// Check if {node} can be covered while generating code for the current
// instruction. A node can be covered if the {user} of the node has the only
// edge and the two are in the same basic block.
bool CanCover(Node* user, Node* node) const;
// CanCover is not transitive. The counter example are Nodes A,B,C such that
// CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
// and B might differ. CanCoverTransitively does the additional checks.
bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
// Used in pattern matching during code generation.
// This function checks that {node} and {user} are in the same basic block,
// and that {user} is the only user of {node} in this basic block. This
// check guarantees that there are no users of {node} scheduled between
// {node} and {user}, and thus we can select a single instruction for both
// nodes, if such an instruction exists. This check can be used for example
// when selecting instructions for:
// n = Int32Add(a, b)
// c = Word32Compare(n, 0, cond)
// Branch(c, true_label, false_label)
// Here we can generate a flag-setting add instruction, even if the add has
// uses in other basic blocks, since the flag-setting add instruction will
// still generate the result of the addition and not just set the flags.
// However, if we had uses of the add in the same basic block, we could have:
// n = Int32Add(a, b)
// o = OtherOp(n, ...)
// c = Word32Compare(n, 0, cond)
// Branch(c, true_label, false_label)
// where we cannot select the add and the compare together. If we were to
// select a flag-setting add instruction for Word32Compare and Int32Add while
// visiting Word32Compare, we would then have to select an instruction for
// OtherOp *afterwards*, which means we would attempt to use the result of
// the add before we have defined it.
bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
// Checks if {node} was already defined, and therefore code was already
// generated for it.
bool IsDefined(Node* node) const;
// Checks if {node} has any uses, and therefore code has to be generated for
// it.
bool IsUsed(Node* node) const;
// Checks if {node} is currently live.
bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
// Gets the effect level of {node}.
int GetEffectLevel(Node* node) const;
// Gets the effect level of {node}, appropriately adjusted based on
// continuation flags if the node is a branch.
int GetEffectLevel(Node* node, FlagsContinuation* cont) const;
int GetVirtualRegister(const Node* node);
const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
// Check if we can generate loads and stores of ExternalConstants relative
// to the roots register.
bool CanAddressRelativeToRootsRegister(
const ExternalReference& reference) const;
// Check if we can use the roots register to access GC roots.
bool CanUseRootsRegister() const;
Isolate* isolate() const { return sequence()->isolate(); }
const ZoneVector<std::pair<int, int>>& instr_origins() const {
return instr_origins_;
}
private:
friend class OperandGenerator;
bool UseInstructionScheduling() const {
return (enable_scheduling_ == kEnableScheduling) &&
InstructionScheduler::SchedulerSupported();
}
void AppendDeoptimizeArguments(InstructionOperandVector* args,
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
Node* frame_state);
void EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand const& index_operand);
void EmitBinarySearchSwitch(const SwitchInfo& sw,
InstructionOperand const& value_operand);
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
void SetRename(const Node* node, const Node* rename);
void UpdateRenames(Instruction* instruction);
void UpdateRenamesInPhi(PhiInstruction* phi);
// Inform the instruction selection that {node} was just defined.
void MarkAsDefined(Node* node);
// Inform the instruction selection that {node} has at least one use and we
// will need to generate code for it.
void MarkAsUsed(Node* node);
// Sets the effect level of {node}.
void SetEffectLevel(Node* node, int effect_level);
// Inform the register allocation of the representation of the value produced
// by {node}.
void MarkAsRepresentation(MachineRepresentation rep, Node* node);
void MarkAsWord32(Node* node) {
MarkAsRepresentation(MachineRepresentation::kWord32, node);
}
void MarkAsWord64(Node* node) {
MarkAsRepresentation(MachineRepresentation::kWord64, node);
}
void MarkAsFloat32(Node* node) {
MarkAsRepresentation(MachineRepresentation::kFloat32, node);
}
void MarkAsFloat64(Node* node) {
MarkAsRepresentation(MachineRepresentation::kFloat64, node);
}
void MarkAsSimd128(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
}
void MarkAsTagged(Node* node) {
MarkAsRepresentation(MachineRepresentation::kTagged, node);
}
void MarkAsCompressed(Node* node) {
MarkAsRepresentation(MachineRepresentation::kCompressed, node);
}
// Inform the register allocation of the representation of the unallocated
// operand {op}.
void MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op);
enum CallBufferFlag {
kCallCodeImmediate = 1u << 0,
kCallAddressImmediate = 1u << 1,
kCallTail = 1u << 2,
kCallFixedTargetRegister = 1u << 3
};
using CallBufferFlags = base::Flags<CallBufferFlag>;
// Initialize the call buffer with the InstructionOperands, nodes, etc,
// corresponding
// to the inputs and outputs of the call.
// {call_code_immediate} to generate immediate operands to calls of code.
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
CallBufferFlags flags, bool is_tail_call,
int stack_slot_delta = 0);
bool IsTailCallAddressImmediate();
int GetTempsCountForTailCallFromJSFunction();
void UpdateMaxPushedArgumentCount(size_t count);
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
Node* state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator,
InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone);
size_t AddInputsToFrameStateDescriptor(StateValueList* values,
InstructionOperandVector* inputs,
OperandGenerator* g,
StateObjectDeduplicator* deduplicator,
Node* node, FrameStateInputKind kind,
Zone* zone);
size_t AddOperandToStateValueDescriptor(StateValueList* values,
InstructionOperandVector* inputs,
OperandGenerator* g,
StateObjectDeduplicator* deduplicator,
Node* input, MachineType type,
FrameStateInputKind kind, Zone* zone);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
// ===========================================================================
// Visit nodes in the given block and generate code.
void VisitBlock(BasicBlock* block);
// Visit the node for the control flow at the end of the block, generating
// code if necessary.
void VisitControl(BasicBlock* block);
// Visit the node and generate code, if any.
void VisitNode(Node* node);
// Visit the node and generate code for IEEE 754 functions.
void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
// Visit the load node with a value and opcode to replace with.
void VisitLoad(Node* node, Node* value, InstructionCode opcode);
void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode);
void VisitFinishRegion(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
void VisitOsrValue(Node* node);
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call, BasicBlock* handler = nullptr);
void VisitDeoptimizeIf(Node* node);
void VisitDeoptimizeUnless(Node* node);
void VisitTrapIf(Node* node, TrapId trap_id);
void VisitTrapUnless(Node* node, TrapId trap_id);
void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback, Node* frame_state);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
void VisitUnreachable(Node* node);
void VisitStaticAssert(Node* node);
void VisitDeadValue(Node* node);
void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont);
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
void EmitWordPoisonOnSpeculation(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
const CallDescriptor* call_descriptor, Node* node);
bool CanProduceSignalingNaN(Node* node);
// ===========================================================================
// ============= Vector instruction (SIMD) helper fns. =======================
// ===========================================================================
// Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
// indices, and a boolean indicating if the shuffle is a swizzle (one input).
void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
// Swaps the two first input operands of the node, to help match shuffles
// to specific architectural instructions.
void SwapShuffleInputs(Node* node);
// ===========================================================================
Schedule* schedule() const { return schedule_; }
Linkage* linkage() const { return linkage_; }
InstructionSequence* sequence() const { return sequence_; }
Zone* instruction_zone() const { return sequence()->zone(); }
Zone* zone() const { return zone_; }
void set_instruction_selection_failed() {
instruction_selection_failed_ = true;
}
bool instruction_selection_failed() { return instruction_selection_failed_; }
void MarkPairProjectionsAsWord32(Node* node);
bool IsSourcePositionUsed(Node* node);
void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
ArchOpcode uint8_op,
ArchOpcode int16_op,
ArchOpcode uint16_op,
ArchOpcode word32_op);
void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
ArchOpcode uint16_op,
ArchOpcode uint32_op,
ArchOpcode uint64_op);
void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
ArchOpcode uint16_op, ArchOpcode uint32_op);
#if V8_TARGET_ARCH_64_BIT
bool ZeroExtendsWord32ToWord64(Node* node, int recursion_depth = 0);
bool ZeroExtendsWord32ToWord64NoPhis(Node* node);
enum Upper32BitsState : uint8_t {
kNotYetChecked,
kUpperBitsGuaranteedZero,
kNoGuarantee,
};
#endif // V8_TARGET_ARCH_64_BIT
// ===========================================================================
Zone* const zone_;
Linkage* const linkage_;
InstructionSequence* const sequence_;
SourcePositionTable* const source_positions_;
SourcePositionMode const source_position_mode_;
Features features_;
Schedule* const schedule_;
BasicBlock* current_block_;
ZoneVector<Instruction*> instructions_;
InstructionOperandVector continuation_inputs_;
InstructionOperandVector continuation_outputs_;
InstructionOperandVector continuation_temps_;
BoolVector defined_;
BoolVector used_;
IntVector effect_level_;
IntVector virtual_registers_;
IntVector virtual_register_rename_;
InstructionScheduler* scheduler_;
EnableScheduling enable_scheduling_;
EnableRootsRelativeAddressing enable_roots_relative_addressing_;
EnableSwitchJumpTable enable_switch_jump_table_;
PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
EnableTraceTurboJson trace_turbo_;
TickCounter* const tick_counter_;
// Store the maximal unoptimized frame height and an maximal number of pushed
// arguments (for calls). Later used to apply an offset to stack checks.
size_t* max_unoptimized_frame_height_;
size_t* max_pushed_argument_count_;
#if V8_TARGET_ARCH_64_BIT
// Holds lazily-computed results for whether phi nodes guarantee their upper
// 32 bits to be zero. Indexed by node ID; nobody reads or writes the values
// for non-phi nodes.
ZoneVector<Upper32BitsState> phi_states_;
#endif
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
|