diff options
Diffstat (limited to 'deps/v8/src/compiler/instruction-selector.cc')
-rw-r--r-- | deps/v8/src/compiler/instruction-selector.cc | 436 |
1 files changed, 345 insertions, 91 deletions
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc index eac5571e9c..86868e59ee 100644 --- a/deps/v8/src/compiler/instruction-selector.cc +++ b/deps/v8/src/compiler/instruction-selector.cc @@ -12,6 +12,7 @@ #include "src/compiler/pipeline.h" #include "src/compiler/schedule.h" #include "src/compiler/state-values-utils.h" +#include "src/deoptimizer.h" namespace v8 { namespace internal { @@ -29,12 +30,13 @@ InstructionSelector::InstructionSelector( source_position_mode_(source_position_mode), features_(features), schedule_(schedule), - current_block_(NULL), + current_block_(nullptr), instructions_(zone), defined_(node_count, false, zone), used_(node_count, false, zone), virtual_registers_(node_count, - InstructionOperand::kInvalidVirtualRegister, zone) { + InstructionOperand::kInvalidVirtualRegister, zone), + scheduler_(nullptr) { instructions_.reserve(node_count); } @@ -61,17 +63,55 @@ void InstructionSelector::SelectInstructions() { } // Schedule the selected instructions. + if (FLAG_turbo_instruction_scheduling && + InstructionScheduler::SchedulerSupported()) { + scheduler_ = new (zone()) InstructionScheduler(zone(), sequence()); + } + for (auto const block : *blocks) { InstructionBlock* instruction_block = sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number())); size_t end = instruction_block->code_end(); size_t start = instruction_block->code_start(); DCHECK_LE(end, start); - sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number())); + StartBlock(RpoNumber::FromInt(block->rpo_number())); while (start-- > end) { - sequence()->AddInstruction(instructions_[start]); + AddInstruction(instructions_[start]); } - sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number())); + EndBlock(RpoNumber::FromInt(block->rpo_number())); + } +} + + +void InstructionSelector::StartBlock(RpoNumber rpo) { + if (FLAG_turbo_instruction_scheduling && + InstructionScheduler::SchedulerSupported()) { + DCHECK_NOT_NULL(scheduler_); + scheduler_->StartBlock(rpo); + } else { + sequence()->StartBlock(rpo); + } +} + + +void InstructionSelector::EndBlock(RpoNumber rpo) { + if (FLAG_turbo_instruction_scheduling && + InstructionScheduler::SchedulerSupported()) { + DCHECK_NOT_NULL(scheduler_); + scheduler_->EndBlock(rpo); + } else { + sequence()->EndBlock(rpo); + } +} + + +void InstructionSelector::AddInstruction(Instruction* instr) { + if (FLAG_turbo_instruction_scheduling && + InstructionScheduler::SchedulerSupported()) { + DCHECK_NOT_NULL(scheduler_); + scheduler_->AddInstruction(instr); + } else { + sequence()->AddInstruction(instr); } } @@ -81,7 +121,7 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode, size_t temp_count, InstructionOperand* temps) { size_t output_count = output.IsInvalid() ? 0 : 1; - return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps); + return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps); } @@ -240,16 +280,15 @@ void InstructionSelector::MarkAsUsed(Node* node) { } -void InstructionSelector::MarkAsRepresentation(MachineType rep, +void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep, const InstructionOperand& op) { UnallocatedOperand unalloc = UnallocatedOperand::cast(op); - rep = RepresentationOf(rep); sequence()->MarkAsRepresentation(rep, unalloc.virtual_register()); } -void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) { - rep = RepresentationOf(rep); +void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep, + Node* node) { sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node)); } @@ -268,6 +307,9 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input, case IrOpcode::kFloat64Constant: case IrOpcode::kHeapConstant: return g->UseImmediate(input); + case IrOpcode::kObjectState: + UNREACHABLE(); + break; default: switch (kind) { case FrameStateInputKind::kStackSlot: @@ -275,21 +317,94 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input, case FrameStateInputKind::kAny: return g->UseAny(input); } - UNREACHABLE(); - return InstructionOperand(); + } + UNREACHABLE(); + return InstructionOperand(); +} + + +class StateObjectDeduplicator { + public: + explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {} + static const size_t kNotDuplicated = SIZE_MAX; + + size_t GetObjectId(Node* node) { + for (size_t i = 0; i < objects_.size(); ++i) { + if (objects_[i] == node) { + return i; + } + } + return kNotDuplicated; + } + + size_t InsertObject(Node* node) { + size_t id = objects_.size(); + objects_.push_back(node); + return id; + } + + private: + ZoneVector<Node*> objects_; +}; + + +// Returns the number of instruction operands added to inputs. +size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor, + InstructionOperandVector* inputs, + OperandGenerator* g, + StateObjectDeduplicator* deduplicator, + Node* input, MachineType type, + FrameStateInputKind kind, Zone* zone) { + switch (input->opcode()) { + case IrOpcode::kObjectState: { + size_t id = deduplicator->GetObjectId(input); + if (id == StateObjectDeduplicator::kNotDuplicated) { + size_t entries = 0; + id = deduplicator->InsertObject(input); + descriptor->fields().push_back( + StateValueDescriptor::Recursive(zone, id)); + StateValueDescriptor* new_desc = &descriptor->fields().back(); + for (Edge edge : input->input_edges()) { + entries += AddOperandToStateValueDescriptor( + new_desc, inputs, g, deduplicator, edge.to(), + MachineType::AnyTagged(), kind, zone); + } + return entries; + } else { + // Crankshaft counts duplicate objects for the running id, so we have + // to push the input again. + deduplicator->InsertObject(input); + descriptor->fields().push_back( + StateValueDescriptor::Duplicate(zone, id)); + return 0; + } + break; + } + default: { + inputs->push_back(OperandForDeopt(g, input, kind)); + descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type)); + return 1; + } } } -void AddFrameStateInputs(Node* state, OperandGenerator* g, - InstructionOperandVector* inputs, - FrameStateDescriptor* descriptor, - FrameStateInputKind kind, Zone* zone) { +// Returns the number of instruction operands added to inputs. +size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor, + Node* state, OperandGenerator* g, + StateObjectDeduplicator* deduplicator, + InstructionOperandVector* inputs, + FrameStateInputKind kind, Zone* zone) { DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode()); + size_t entries = 0; + size_t initial_size = inputs->size(); + USE(initial_size); // initial_size is only used for debug. + if (descriptor->outer_state()) { - AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), g, inputs, - descriptor->outer_state(), kind, zone); + entries += AddInputsToFrameStateDescriptor( + descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput), + g, deduplicator, inputs, kind, zone); } Node* parameters = state->InputAt(kFrameStateParametersInput); @@ -303,30 +418,34 @@ void AddFrameStateInputs(Node* state, OperandGenerator* g, DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size()); DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size()); - ZoneVector<MachineType> types(zone); - types.reserve(descriptor->GetSize()); - - size_t value_index = 0; - inputs->push_back(OperandForDeopt(g, function, kind)); - descriptor->SetType(value_index++, kMachAnyTagged); + StateValueDescriptor* values_descriptor = + descriptor->GetStateValueDescriptor(); + entries += AddOperandToStateValueDescriptor( + values_descriptor, inputs, g, deduplicator, function, + MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone); for (StateValuesAccess::TypedNode input_node : StateValuesAccess(parameters)) { - inputs->push_back(OperandForDeopt(g, input_node.node, kind)); - descriptor->SetType(value_index++, input_node.type); + entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g, + deduplicator, input_node.node, + input_node.type, kind, zone); } if (descriptor->HasContext()) { - inputs->push_back(OperandForDeopt(g, context, kind)); - descriptor->SetType(value_index++, kMachAnyTagged); + entries += AddOperandToStateValueDescriptor( + values_descriptor, inputs, g, deduplicator, context, + MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone); } for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) { - inputs->push_back(OperandForDeopt(g, input_node.node, kind)); - descriptor->SetType(value_index++, input_node.type); + entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g, + deduplicator, input_node.node, + input_node.type, kind, zone); } for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) { - inputs->push_back(OperandForDeopt(g, input_node.node, kind)); - descriptor->SetType(value_index++, input_node.type); + entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g, + deduplicator, input_node.node, + input_node.type, kind, zone); } - DCHECK(value_index == descriptor->GetSize()); + DCHECK_EQ(initial_size + entries, inputs->size()); + return entries; } } // namespace @@ -356,14 +475,14 @@ struct CallBuffer { NodeVector output_nodes; InstructionOperandVector outputs; InstructionOperandVector instruction_args; - NodeVector pushed_nodes; + ZoneVector<PushParameter> pushed_nodes; size_t input_count() const { return descriptor->InputCount(); } size_t frame_state_count() const { return descriptor->FrameStateCount(); } size_t frame_state_value_count() const { - return (frame_state_descriptor == NULL) + return (frame_state_descriptor == nullptr) ? 0 : (frame_state_descriptor->GetTotalSize() + 1); // Include deopt id. @@ -374,8 +493,8 @@ struct CallBuffer { // TODO(bmeurer): Get rid of the CallBuffer business and make // InstructionSelector::VisitCall platform independent instead. void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, - bool call_code_immediate, - bool call_address_immediate) { + CallBufferFlags flags, + int stack_param_delta) { OperandGenerator g(this); DCHECK_LE(call->op()->ValueOutputCount(), static_cast<int>(buffer->descriptor->ReturnCount())); @@ -400,13 +519,13 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, // Filter out the outputs that aren't live because no projection uses them. size_t outputs_needed_by_framestate = - buffer->frame_state_descriptor == NULL + buffer->frame_state_descriptor == nullptr ? 0 : buffer->frame_state_descriptor->state_combine() .ConsumedOutputCount(); for (size_t i = 0; i < buffer->output_nodes.size(); i++) { - bool output_is_live = - buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate; + bool output_is_live = buffer->output_nodes[i] != nullptr || + i < outputs_needed_by_framestate; if (output_is_live) { MachineType type = buffer->descriptor->GetReturnType(static_cast<int>(i)); @@ -415,9 +534,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, Node* output = buffer->output_nodes[i]; InstructionOperand op = - output == NULL ? g.TempLocation(location, type) - : g.DefineAsLocation(output, location, type); - MarkAsRepresentation(type, op); + output == nullptr + ? g.TempLocation(location, type.representation()) + : g.DefineAsLocation(output, location, type.representation()); + MarkAsRepresentation(type.representation(), op); buffer->outputs.push_back(op); } @@ -426,6 +546,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, // The first argument is always the callee code. Node* callee = call->InputAt(0); + bool call_code_immediate = (flags & kCallCodeImmediate) != 0; + bool call_address_immediate = (flags & kCallAddressImmediate) != 0; switch (buffer->descriptor->kind()) { case CallDescriptor::kCallCodeObject: buffer->instruction_args.push_back( @@ -443,7 +565,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, case CallDescriptor::kCallJSFunction: buffer->instruction_args.push_back( g.UseLocation(callee, buffer->descriptor->GetInputLocation(0), - buffer->descriptor->GetInputType(0))); + buffer->descriptor->GetInputType(0).representation())); break; case CallDescriptor::kLazyBailout: // The target is ignored, but we still need to pass a value here. @@ -456,19 +578,26 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, // follows (n is the number of value inputs to the frame state): // arg 1 : deoptimization id. // arg 2 - arg (n + 1) : value inputs to the frame state. - if (buffer->frame_state_descriptor != NULL) { + size_t frame_state_entries = 0; + USE(frame_state_entries); // frame_state_entries is only used for debug. + if (buffer->frame_state_descriptor != nullptr) { InstructionSequence::StateId state_id = sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor); buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt())); Node* frame_state = call->InputAt(static_cast<int>(buffer->descriptor->InputCount())); - AddFrameStateInputs(frame_state, &g, &buffer->instruction_args, - buffer->frame_state_descriptor, - FrameStateInputKind::kStackSlot, instruction_zone()); + + StateObjectDeduplicator deduplicator(instruction_zone()); + + frame_state_entries = + 1 + AddInputsToFrameStateDescriptor( + buffer->frame_state_descriptor, frame_state, &g, &deduplicator, + &buffer->instruction_args, FrameStateInputKind::kStackSlot, + instruction_zone()); + + DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size()); } - DCHECK(1 + buffer->frame_state_value_count() == - buffer->instruction_args.size()); size_t input_count = static_cast<size_t>(buffer->input_count()); @@ -478,27 +607,47 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, // as an InstructionOperand argument to the call. auto iter(call->inputs().begin()); size_t pushed_count = 0; + bool call_tail = (flags & kCallTail) != 0; for (size_t index = 0; index < input_count; ++iter, ++index) { DCHECK(iter != call->inputs().end()); DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState); if (index == 0) continue; // The first argument (callee) is already done. + + LinkageLocation location = buffer->descriptor->GetInputLocation(index); + if (call_tail) { + location = LinkageLocation::ConvertToTailCallerLocation( + location, stack_param_delta); + } InstructionOperand op = - g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index), - buffer->descriptor->GetInputType(index)); - if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) { + g.UseLocation(*iter, location, + buffer->descriptor->GetInputType(index).representation()); + if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) { int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1; if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) { - buffer->pushed_nodes.resize(stack_index + 1, NULL); + buffer->pushed_nodes.resize(stack_index + 1); } - DCHECK(!buffer->pushed_nodes[stack_index]); - buffer->pushed_nodes[stack_index] = *iter; + PushParameter parameter(*iter, buffer->descriptor->GetInputType(index)); + buffer->pushed_nodes[stack_index] = parameter; pushed_count++; } else { buffer->instruction_args.push_back(op); } } DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count - - buffer->frame_state_value_count()); + frame_state_entries); + if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail && + stack_param_delta != 0) { + // For tail calls that change the size of their parameter list and keep + // their return address on the stack, move the return address to just above + // the parameters. + LinkageLocation saved_return_location = + LinkageLocation::ForSavedCallerReturnAddress(); + InstructionOperand return_address = + g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation( + saved_return_location, stack_param_delta), + saved_return_location); + buffer->instruction_args.push_back(return_address); + } } @@ -539,7 +688,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { instruction_block->set_code_start(static_cast<int>(instructions_.size())); instruction_block->set_code_end(current_block_end); - current_block_ = NULL; + current_block_ = nullptr; } @@ -585,7 +734,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) { DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode()); // All other successors must be cases. sw.case_count = block->SuccessorCount() - 1; - DCHECK_LE(1u, sw.case_count); sw.case_branches = &block->successors().front(); // Determine case values and their min/max. sw.case_values = zone()->NewArray<int32_t>(sw.case_count); @@ -611,12 +759,9 @@ void InstructionSelector::VisitControl(BasicBlock* block) { return VisitReturn(input); } case BasicBlock::kDeoptimize: { - // If the result itself is a return, return its input. - Node* value = - (input != nullptr && input->opcode() == IrOpcode::kDeoptimize) - ? input->InputAt(0) - : input; - return VisitDeoptimize(value); + DeoptimizeKind kind = DeoptimizeKindOf(input->op()); + Node* value = input->InputAt(0); + return VisitDeoptimize(kind, value); } case BasicBlock::kThrow: DCHECK_EQ(IrOpcode::kThrow, input->opcode()); @@ -661,14 +806,14 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kParameter: { MachineType type = linkage()->GetParameterType(ParameterIndexOf(node->op())); - MarkAsRepresentation(type, node); + MarkAsRepresentation(type.representation(), node); return VisitParameter(node); } case IrOpcode::kOsrValue: return MarkAsReference(node), VisitOsrValue(node); case IrOpcode::kPhi: { - MachineType type = OpParameter<MachineType>(node); - MarkAsRepresentation(type, node); + MachineRepresentation rep = PhiRepresentationOf(node->op()); + MarkAsRepresentation(rep, node); return VisitPhi(node); } case IrOpcode::kProjection: @@ -692,10 +837,11 @@ void InstructionSelector::VisitNode(Node* node) { return VisitCall(node); case IrOpcode::kFrameState: case IrOpcode::kStateValues: + case IrOpcode::kObjectState: return; case IrOpcode::kLoad: { - LoadRepresentation rep = OpParameter<LoadRepresentation>(node); - MarkAsRepresentation(rep, node); + LoadRepresentation type = LoadRepresentationOf(node->op()); + MarkAsRepresentation(type.representation(), node); return VisitLoad(node); } case IrOpcode::kStore: @@ -776,8 +922,12 @@ void InstructionSelector::VisitNode(Node* node) { return VisitUint32MulHigh(node); case IrOpcode::kInt64Add: return MarkAsWord64(node), VisitInt64Add(node); + case IrOpcode::kInt64AddWithOverflow: + return MarkAsWord64(node), VisitInt64AddWithOverflow(node); case IrOpcode::kInt64Sub: return MarkAsWord64(node), VisitInt64Sub(node); + case IrOpcode::kInt64SubWithOverflow: + return MarkAsWord64(node), VisitInt64SubWithOverflow(node); case IrOpcode::kInt64Mul: return MarkAsWord64(node), VisitInt64Mul(node); case IrOpcode::kInt64Div: @@ -806,6 +956,14 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsWord32(node), VisitChangeFloat64ToInt32(node); case IrOpcode::kChangeFloat64ToUint32: return MarkAsWord32(node), VisitChangeFloat64ToUint32(node); + case IrOpcode::kTryTruncateFloat32ToInt64: + return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node); + case IrOpcode::kTryTruncateFloat64ToInt64: + return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node); + case IrOpcode::kTryTruncateFloat32ToUint64: + return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node); + case IrOpcode::kTryTruncateFloat64ToUint64: + return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node); case IrOpcode::kChangeInt32ToInt64: return MarkAsWord64(node), VisitChangeInt32ToInt64(node); case IrOpcode::kChangeUint32ToUint64: @@ -822,6 +980,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node); case IrOpcode::kBitcastFloat32ToInt32: return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node); + case IrOpcode::kRoundUint64ToFloat32: + return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node); + case IrOpcode::kRoundUint64ToFloat64: + return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node); case IrOpcode::kBitcastFloat64ToInt64: return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node); case IrOpcode::kBitcastInt32ToFloat32: @@ -874,12 +1036,24 @@ void InstructionSelector::VisitNode(Node* node) { return VisitFloat64LessThan(node); case IrOpcode::kFloat64LessThanOrEqual: return VisitFloat64LessThanOrEqual(node); + case IrOpcode::kFloat32RoundDown: + return MarkAsFloat32(node), VisitFloat32RoundDown(node); case IrOpcode::kFloat64RoundDown: return MarkAsFloat64(node), VisitFloat64RoundDown(node); + case IrOpcode::kFloat32RoundUp: + return MarkAsFloat32(node), VisitFloat32RoundUp(node); + case IrOpcode::kFloat64RoundUp: + return MarkAsFloat64(node), VisitFloat64RoundUp(node); + case IrOpcode::kFloat32RoundTruncate: + return MarkAsFloat32(node), VisitFloat32RoundTruncate(node); case IrOpcode::kFloat64RoundTruncate: return MarkAsFloat64(node), VisitFloat64RoundTruncate(node); case IrOpcode::kFloat64RoundTiesAway: return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node); + case IrOpcode::kFloat32RoundTiesEven: + return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node); + case IrOpcode::kFloat64RoundTiesEven: + return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node); case IrOpcode::kFloat64ExtractLowWord32: return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node); case IrOpcode::kFloat64ExtractHighWord32: @@ -893,7 +1067,8 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kLoadFramePointer: return VisitLoadFramePointer(node); case IrOpcode::kCheckedLoad: { - MachineType rep = OpParameter<MachineType>(node); + MachineRepresentation rep = + CheckedLoadRepresentationOf(node->op()).representation(); MarkAsRepresentation(rep, node); return VisitCheckedLoad(node); } @@ -994,9 +1169,19 @@ void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { + UNIMPLEMENTED(); +} + + void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { + UNIMPLEMENTED(); +} + + void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); } @@ -1038,6 +1223,26 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { } +void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { + UNIMPLEMENTED(); +} + + void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { UNIMPLEMENTED(); } @@ -1053,6 +1258,16 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { } +void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { + UNIMPLEMENTED(); +} + + void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { UNIMPLEMENTED(); } @@ -1082,9 +1297,16 @@ void InstructionSelector::VisitGuard(Node* node) { void InstructionSelector::VisitParameter(Node* node) { OperandGenerator g(this); int index = ParameterIndexOf(node->op()); - Emit(kArchNop, - g.DefineAsLocation(node, linkage()->GetParameterLocation(index), - linkage()->GetParameterType(index))); + InstructionOperand op = + linkage()->ParameterHasSecondaryLocation(index) + ? g.DefineAsDualLocation( + node, linkage()->GetParameterLocation(index), + linkage()->GetParameterSecondaryLocation(index)) + : g.DefineAsLocation( + node, linkage()->GetParameterLocation(index), + linkage()->GetParameterType(index).representation()); + + Emit(kArchNop, op); } @@ -1093,8 +1315,9 @@ void InstructionSelector::VisitIfException(Node* node) { Node* call = node->InputAt(1); DCHECK_EQ(IrOpcode::kCall, call->opcode()); const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call); - Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0), - descriptor->GetReturnType(0))); + Emit(kArchNop, + g.DefineAsLocation(node, descriptor->GetReturnLocation(0), + descriptor->GetReturnType(0).representation())); } @@ -1102,7 +1325,7 @@ void InstructionSelector::VisitOsrValue(Node* node) { OperandGenerator g(this); int index = OpParameter<int>(node); Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index), - kMachAnyTagged)); + MachineRepresentation::kTagged)); } @@ -1128,6 +1351,12 @@ void InstructionSelector::VisitProjection(Node* node) { switch (value->opcode()) { case IrOpcode::kInt32AddWithOverflow: case IrOpcode::kInt32SubWithOverflow: + case IrOpcode::kInt64AddWithOverflow: + case IrOpcode::kInt64SubWithOverflow: + case IrOpcode::kTryTruncateFloat32ToInt64: + case IrOpcode::kTryTruncateFloat64ToInt64: + case IrOpcode::kTryTruncateFloat32ToUint64: + case IrOpcode::kTryTruncateFloat64ToUint64: if (ProjectionIndexOf(node->op()) == 0u) { Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); } else { @@ -1166,7 +1395,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { // the code object in a register if there are multiple uses of it. // Improve constant pool and the heuristics in the register allocator // for where to emit constants. - InitializeCallBuffer(node, &buffer, true, true); + CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate); + InitializeCallBuffer(node, &buffer, call_buffer_flags); EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node); @@ -1219,11 +1449,17 @@ void InstructionSelector::VisitTailCall(Node* node) { // TODO(turbofan): Relax restriction for stack parameters. - if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) { + int stack_param_delta = 0; + if (linkage()->GetIncomingDescriptor()->CanTailCall(node, + &stack_param_delta)) { CallBuffer buffer(zone(), descriptor, nullptr); // Compute InstructionOperands for inputs and outputs. - InitializeCallBuffer(node, &buffer, true, IsTailCallAddressImmediate()); + CallBufferFlags flags(kCallCodeImmediate | kCallTail); + if (IsTailCallAddressImmediate()) { + flags |= kCallAddressImmediate; + } + InitializeCallBuffer(node, &buffer, flags, stack_param_delta); // Select the appropriate opcode based on the call type. InstructionCode opcode; @@ -1240,6 +1476,11 @@ void InstructionSelector::VisitTailCall(Node* node) { } opcode |= MiscField::encode(descriptor->flags()); + buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta)); + + Emit(kArchPrepareTailCall, g.NoOutput(), + g.TempImmediate(stack_param_delta)); + // Emit the tailcall instruction. Emit(opcode, 0, nullptr, buffer.instruction_args.size(), &buffer.instruction_args.front()); @@ -1253,7 +1494,11 @@ void InstructionSelector::VisitTailCall(Node* node) { CallBuffer buffer(zone(), descriptor, frame_state_descriptor); // Compute InstructionOperands for inputs and outputs. - InitializeCallBuffer(node, &buffer, true, IsTailCallAddressImmediate()); + CallBufferFlags flags = kCallCodeImmediate; + if (IsTailCallAddressImmediate()) { + flags |= kCallAddressImmediate; + } + InitializeCallBuffer(node, &buffer, flags); EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node); @@ -1300,38 +1545,47 @@ void InstructionSelector::VisitReturn(Node* ret) { for (int i = 0; i < ret_count; ++i) { value_locations[i] = g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i), - linkage()->GetReturnType(i)); + linkage()->GetReturnType(i).representation()); } Emit(kArchRet, 0, nullptr, ret_count, value_locations); } } -void InstructionSelector::VisitDeoptimize(Node* value) { +void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) { OperandGenerator g(this); FrameStateDescriptor* desc = GetFrameStateDescriptor(value); - size_t arg_count = desc->GetTotalSize() + 1; // Include deopt id. InstructionOperandVector args(instruction_zone()); - args.reserve(arg_count); + args.reserve(desc->GetTotalSize() + 1); // Include deopt id. InstructionSequence::StateId state_id = sequence()->AddFrameStateDescriptor(desc); args.push_back(g.TempImmediate(state_id.ToInt())); - AddFrameStateInputs(value, &g, &args, desc, FrameStateInputKind::kAny, - instruction_zone()); + StateObjectDeduplicator deduplicator(instruction_zone()); - DCHECK_EQ(args.size(), arg_count); + AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args, + FrameStateInputKind::kAny, + instruction_zone()); - Emit(kArchDeoptimize, 0, nullptr, arg_count, &args.front(), 0, nullptr); + InstructionCode opcode = kArchDeoptimize; + switch (kind) { + case DeoptimizeKind::kEager: + opcode |= MiscField::encode(Deoptimizer::EAGER); + break; + case DeoptimizeKind::kSoft: + opcode |= MiscField::encode(Deoptimizer::SOFT); + break; + } + Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr); } void InstructionSelector::VisitThrow(Node* value) { OperandGenerator g(this); - Emit(kArchNop, g.NoOutput()); // TODO(titzer) + Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer) } @@ -1351,7 +1605,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor( DCHECK_EQ(parameters, state_info.parameter_count()); DCHECK_EQ(locals, state_info.local_count()); - FrameStateDescriptor* outer_state = NULL; + FrameStateDescriptor* outer_state = nullptr; Node* outer_node = state->InputAt(kFrameStateOuterStateInput); if (outer_node->opcode() == IrOpcode::kFrameState) { outer_state = GetFrameStateDescriptor(outer_node); |