summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/backend/code-generator.h
blob: ee36b75dae562290a8dae3eb118e4f689fae4996 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
#define V8_COMPILER_BACKEND_CODE_GENERATOR_H_

#include <memory>

#include "src/base/optional.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/safepoint-table.h"
#include "src/codegen/source-position-table.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/unwinding-info-writer.h"
#include "src/compiler/osr.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/code-kind.h"

#if V8_ENABLE_WEBASSEMBLY
#include "src/trap-handler/trap-handler.h"
#endif  // V8_ENABLE_WEBASSEMBLY

namespace v8::internal::wasm {
class AssemblerBufferCache;
}

namespace v8::internal::compiler {

// Forward declarations.
class DeoptimizationExit;
class FrameAccessState;
class Linkage;
class OutOfLineCode;

struct BranchInfo {
  FlagsCondition condition;
  Label* true_label;
  Label* false_label;
  bool fallthru;
};

class InstructionOperandIterator {
 public:
  InstructionOperandIterator(Instruction* instr, size_t pos)
      : instr_(instr), pos_(pos) {}

  Instruction* instruction() const { return instr_; }
  InstructionOperand* Advance() { return instr_->InputAt(pos_++); }

 private:
  Instruction* instr_;
  size_t pos_;
};

enum class DeoptimizationLiteralKind {
  kObject,
  kNumber,
  kSignedBigInt64,
  kUnsignedBigInt64,
  kInvalid
};

// A non-null Handle<Object>, a double, an int64_t, or a uint64_t.
class DeoptimizationLiteral {
 public:
  DeoptimizationLiteral()
      : kind_(DeoptimizationLiteralKind::kInvalid), object_() {}
  explicit DeoptimizationLiteral(Handle<Object> object)
      : kind_(DeoptimizationLiteralKind::kObject), object_(object) {
    CHECK(!object_.is_null());
  }
  explicit DeoptimizationLiteral(double number)
      : kind_(DeoptimizationLiteralKind::kNumber), number_(number) {}
  explicit DeoptimizationLiteral(int64_t signed_bigint64)
      : kind_(DeoptimizationLiteralKind::kSignedBigInt64),
        signed_bigint64_(signed_bigint64) {}
  explicit DeoptimizationLiteral(uint64_t unsigned_bigint64)
      : kind_(DeoptimizationLiteralKind::kUnsignedBigInt64),
        unsigned_bigint64_(unsigned_bigint64) {}

  Handle<Object> object() const { return object_; }

  bool operator==(const DeoptimizationLiteral& other) const {
    if (kind_ != other.kind_) {
      return false;
    }
    switch (kind_) {
      case DeoptimizationLiteralKind::kObject:
        return object_.equals(other.object_);
      case DeoptimizationLiteralKind::kNumber:
        return base::bit_cast<uint64_t>(number_) ==
               base::bit_cast<uint64_t>(other.number_);
      case DeoptimizationLiteralKind::kSignedBigInt64:
        return signed_bigint64_ == other.signed_bigint64_;
      case DeoptimizationLiteralKind::kUnsignedBigInt64:
        return unsigned_bigint64_ == other.unsigned_bigint64_;
      case DeoptimizationLiteralKind::kInvalid:
        return true;
    }
    UNREACHABLE();
  }

  Handle<Object> Reify(Isolate* isolate) const;

  void Validate() const {
    CHECK_NE(kind_, DeoptimizationLiteralKind::kInvalid);
  }

  DeoptimizationLiteralKind kind() const {
    Validate();
    return kind_;
  }

 private:
  DeoptimizationLiteralKind kind_;

  union {
    Handle<Object> object_;
    double number_;
    int64_t signed_bigint64_;
    uint64_t unsigned_bigint64_;
  };
};

// These structs hold pc offsets for generated instructions and is only used
// when tracing for turbolizer is enabled.
struct TurbolizerCodeOffsetsInfo {
  int code_start_register_check = -1;
  int deopt_check = -1;
  int blocks_start = -1;
  int out_of_line_code = -1;
  int deoptimization_exits = -1;
  int pools = -1;
  int jump_tables = -1;
};

struct TurbolizerInstructionStartInfo {
  int gap_pc_offset = -1;
  int arch_instr_pc_offset = -1;
  int condition_pc_offset = -1;
};

// Generates native code for a sequence of instructions.
class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
 public:
  explicit CodeGenerator(
      Zone* codegen_zone, Frame* frame, Linkage* linkage,
      InstructionSequence* instructions, OptimizedCompilationInfo* info,
      Isolate* isolate, base::Optional<OsrHelper> osr_helper,
      int start_source_position, JumpOptimizationInfo* jump_opt,
      const AssemblerOptions& options, wasm::AssemblerBufferCache* buffer_cache,
      Builtin builtin, size_t max_unoptimized_frame_height,
      size_t max_pushed_argument_count, const char* debug_name = nullptr);

  // Generate native code. After calling AssembleCode, call FinalizeCode to
  // produce the actual code object. If an error occurs during either phase,
  // FinalizeCode returns an empty MaybeHandle.
  void AssembleCode();  // Does not need to run on main thread.
  MaybeHandle<Code> FinalizeCode();

  base::OwnedVector<byte> GetSourcePositionTable();
  base::OwnedVector<byte> GetProtectedInstructionsData();

  InstructionSequence* instructions() const { return instructions_; }
  FrameAccessState* frame_access_state() const { return frame_access_state_; }
  const Frame* frame() const { return frame_access_state_->frame(); }
  Isolate* isolate() const { return isolate_; }
  Linkage* linkage() const { return linkage_; }

  Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }

  void AddProtectedInstructionLanding(uint32_t instr_offset,
                                      uint32_t landing_offset);

  bool wasm_runtime_exception_support() const;

  SourcePosition start_source_position() const {
    return start_source_position_;
  }

  void AssembleSourcePosition(Instruction* instr);
  void AssembleSourcePosition(SourcePosition source_position);

  // Record a safepoint with the given pointer map.
  void RecordSafepoint(ReferenceMap* references);

  Zone* zone() const { return zone_; }
  TurboAssembler* tasm() { return &tasm_; }
  SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
  size_t handler_table_offset() const { return handler_table_offset_; }

  const ZoneVector<int>& block_starts() const { return block_starts_; }
  const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const {
    return instr_starts_;
  }

  const TurbolizerCodeOffsetsInfo& offsets_info() const {
    return offsets_info_;
  }

  static constexpr int kBinarySearchSwitchMinimalCases = 4;

  // Returns true if an offset should be applied to the given stack check. There
  // are two reasons that this could happen:
  // 1. The optimized frame is smaller than the corresponding deoptimized frames
  //    and an offset must be applied in order to be able to deopt safely.
  // 2. The current function pushes a large number of arguments to the stack.
  //    These are not accounted for by the initial frame setup.
  bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
  uint32_t GetStackCheckOffset();

  CodeKind code_kind() const { return info_->code_kind(); }

 private:
  GapResolver* resolver() { return &resolver_; }
  SafepointTableBuilder* safepoints() { return &safepoints_; }
  OptimizedCompilationInfo* info() const { return info_; }
  OsrHelper* osr_helper() { return &(*osr_helper_); }

  // Create the FrameAccessState object. The Frame is immutable from here on.
  void CreateFrameAccessState(Frame* frame);

  // Architecture - specific frame finalization.
  void FinishFrame(Frame* frame);

  // Checks if {block} will appear directly after {current_block_} when
  // assembling code, in which case, a fall-through can be used.
  bool IsNextInAssemblyOrder(RpoNumber block) const;

  // Check if a heap object can be materialized by loading from a heap root,
  // which is cheaper on some platforms than materializing the actual heap
  // object constant.
  bool IsMaterializableFromRoot(Handle<HeapObject> object,
                                RootIndex* index_return);

  enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };

  // Assemble instructions for the specified block.
  CodeGenResult AssembleBlock(const InstructionBlock* block);

  // Assemble code for the specified instruction.
  CodeGenResult AssembleInstruction(int instruction_index,
                                    const InstructionBlock* block);
  void AssembleGaps(Instruction* instr);

  // Compute branch info from given instruction. Returns a valid rpo number
  // if the branch is redundant, the returned rpo number point to the target
  // basic block.
  RpoNumber ComputeBranchInfo(BranchInfo* branch, Instruction* instr);

  // Returns true if a instruction is a tail call that needs to adjust the stack
  // pointer before execution. The stack slot index to the empty slot above the
  // adjusted stack pointer is returned in |slot|.
  bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);

  // Determines how to call helper stubs depending on the code kind.
  StubCallMode DetermineStubCallMode() const;

  CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);

  // ===========================================================================
  // ============= Architecture-specific code generation methods. ==============
  // ===========================================================================

  CodeGenResult AssembleArchInstruction(Instruction* instr);
  void AssembleArchJump(RpoNumber target);
  void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target);
  void AssembleArchBranch(Instruction* instr, BranchInfo* branch);

  // Generates special branch for deoptimization condition.
  void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);

  void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
  void AssembleArchSelect(Instruction* instr, FlagsCondition condition);
#if V8_ENABLE_WEBASSEMBLY
  void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
#endif  // V8_ENABLE_WEBASSEMBLY
  void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
                                           std::pair<int32_t, Label*>* begin,
                                           std::pair<int32_t, Label*>* end);
  void AssembleArchBinarySearchSwitch(Instruction* instr);
  void AssembleArchTableSwitch(Instruction* instr);

  // Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
  // contains the expected pointer to the start of the instruction stream.
  void AssembleCodeStartRegisterCheck();

  // When entering a code that is marked for deoptimization, rather continuing
  // with its execution, we jump to a lazy compiled code. We need to do this
  // because this code has already been deoptimized and needs to be unlinked
  // from the JS functions referring it.
  void BailoutIfDeoptimized();

  // Generates an architecture-specific, descriptor-specific prologue
  // to set up a stack frame.
  void AssembleConstructFrame();

  // Generates an architecture-specific, descriptor-specific return sequence
  // to tear down a stack frame.
  void AssembleReturn(InstructionOperand* pop);

  void AssembleDeconstructFrame();

  // Generates code to manipulate the stack in preparation for a tail call.
  void AssemblePrepareTailCall();

  enum PushTypeFlag {
    kImmediatePush = 0x1,
    kRegisterPush = 0x2,
    kStackSlotPush = 0x4,
    kScalarPush = kRegisterPush | kStackSlotPush
  };

  using PushTypeFlags = base::Flags<PushTypeFlag>;

  static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);

  // Generate a list of moves from an instruction that are candidates to be
  // turned into push instructions on platforms that support them. In general,
  // the list of push candidates are moves to a set of contiguous destination
  // InstructionOperand locations on the stack that don't clobber values that
  // are needed to resolve the gap or use values generated by the gap,
  // i.e. moves that can be hoisted together before the actual gap and assembled
  // together.
  static void GetPushCompatibleMoves(Instruction* instr,
                                     PushTypeFlags push_type,
                                     ZoneVector<MoveOperands*>* pushes);

  class MoveType {
   public:
    enum Type {
      kRegisterToRegister,
      kRegisterToStack,
      kStackToRegister,
      kStackToStack,
      kConstantToRegister,
      kConstantToStack
    };

    // Detect what type of move or swap needs to be performed. Note that these
    // functions do not take into account the representation (Tagged, FP,
    // ...etc).

    static Type InferMove(InstructionOperand* source,
                          InstructionOperand* destination);
    static Type InferSwap(InstructionOperand* source,
                          InstructionOperand* destination);
  };
  // Called before a tail call |instr|'s gap moves are assembled and allows
  // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
  // need it before gap moves or conversion of certain gap moves into pushes.
  void AssembleTailCallBeforeGap(Instruction* instr,
                                 int first_unused_stack_slot);
  // Called after a tail call |instr|'s gap moves are assembled and allows
  // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
  // need it after gap moves.
  void AssembleTailCallAfterGap(Instruction* instr,
                                int first_unused_stack_slot);

  void FinishCode();
  void MaybeEmitOutOfLineConstantPool();

  void IncrementStackAccessCounter(InstructionOperand* source,
                                   InstructionOperand* destination);

  // ===========================================================================
  // ============== Architecture-specific gap resolver methods. ================
  // ===========================================================================

  // Interface used by the gap resolver to emit moves and swaps.
  void AssembleMove(InstructionOperand* source,
                    InstructionOperand* destination) final;
  void AssembleSwap(InstructionOperand* source,
                    InstructionOperand* destination) final;
  void MoveToTempLocation(InstructionOperand* src) final;
  void MoveTempLocationTo(InstructionOperand* dst,
                          MachineRepresentation rep) final;
  void SetPendingMove(MoveOperands* move) final;

  // ===========================================================================
  // =================== Jump table construction methods. ======================
  // ===========================================================================

  class JumpTable;
  // Adds a jump table that is emitted after the actual code.  Returns label
  // pointing to the beginning of the table.  {targets} is assumed to be static
  // or zone allocated.
  Label* AddJumpTable(Label** targets, size_t target_count);
  // Emits a jump table.
  void AssembleJumpTable(Label** targets, size_t target_count);

  // ===========================================================================
  // ================== Deoptimization table construction. =====================
  // ===========================================================================

  void RecordCallPosition(Instruction* instr);
  Handle<DeoptimizationData> GenerateDeoptimizationData();
  int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
  DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
                                                    size_t frame_state_offset);
  DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset,
                                       size_t frame_state_offset,
                                       size_t immediate_args_count,
                                       OutputFrameStateCombine state_combine);
  void BuildTranslationForFrameStateDescriptor(
      FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
      OutputFrameStateCombine state_combine);
  void TranslateStateValueDescriptor(StateValueDescriptor* desc,
                                     StateValueList* nested,
                                     InstructionOperandIterator* iter);
  void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
                                             InstructionOperandIterator* iter);
  void AddTranslationForOperand(Instruction* instr, InstructionOperand* op,
                                MachineType type);
  void MarkLazyDeoptSite();

  void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
  DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
                                            size_t frame_state_offset,
                                            size_t immediate_args_count);

  // ===========================================================================

  struct HandlerInfo {
    Label* handler;
    int pc_offset;
  };

  friend class OutOfLineCode;
  friend class CodeGeneratorTester;

  Zone* zone_;
  Isolate* isolate_;
  FrameAccessState* frame_access_state_;
  Linkage* const linkage_;
  InstructionSequence* const instructions_;
  UnwindingInfoWriter unwinding_info_writer_;
  OptimizedCompilationInfo* const info_;
  Label* const labels_;
  Label return_label_;
  RpoNumber current_block_;
  SourcePosition start_source_position_;
  SourcePosition current_source_position_;
  TurboAssembler tasm_;
  GapResolver resolver_;
  SafepointTableBuilder safepoints_;
  ZoneVector<HandlerInfo> handlers_;
  int next_deoptimization_id_ = 0;
  int deopt_exit_start_offset_ = 0;
  int eager_deopt_count_ = 0;
  int lazy_deopt_count_ = 0;
  ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
  ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
  size_t inlined_function_count_ = 0;
  TranslationArrayBuilder translations_;
  int handler_table_offset_ = 0;
  int last_lazy_deopt_pc_ = 0;

  // Deoptimization exits must be as small as possible, since their count grows
  // with function size. {jump_deoptimization_entry_labels_} is an optimization
  // to that effect, which extracts the (potentially large) instruction
  // sequence for the final jump to the deoptimization entry into a single spot
  // per Code object. All deopt exits can then near-call to this label. Note:
  // not used on all architectures.
  Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];

  // The maximal combined height of all frames produced upon deoptimization, and
  // the maximal number of pushed arguments for function calls. Applied as an
  // offset to the first stack check of an optimized function.
  const size_t max_unoptimized_frame_height_;
  const size_t max_pushed_argument_count_;

  // kArchCallCFunction could be reached either:
  //   kArchCallCFunction;
  // or:
  //   kArchSaveCallerRegisters;
  //   kArchCallCFunction;
  //   kArchRestoreCallerRegisters;
  // The boolean is used to distinguish the two cases. In the latter case, we
  // also need to decide if FP registers need to be saved, which is controlled
  // by fp_mode_.
  bool caller_registers_saved_;
  SaveFPRegsMode fp_mode_;

  JumpTable* jump_tables_;
  OutOfLineCode* ools_;
  base::Optional<OsrHelper> osr_helper_;
  int osr_pc_offset_;
  SourcePositionTableBuilder source_position_table_builder_;
#if V8_ENABLE_WEBASSEMBLY
  ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
#endif  // V8_ENABLE_WEBASSEMBLY
  CodeGenResult result_;
  ZoneVector<int> block_starts_;
  TurbolizerCodeOffsetsInfo offsets_info_;
  ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
  MoveCycleState move_cycle_;

  const char* debug_name_ = nullptr;
};

}  // namespace v8::internal::compiler

#endif  // V8_COMPILER_BACKEND_CODE_GENERATOR_H_