diff options
Diffstat (limited to 'deps/v8/test/cctest/compiler')
24 files changed, 1266 insertions, 642 deletions
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h index 83b3328a3b..8eaf6325f2 100644 --- a/deps/v8/test/cctest/compiler/c-signature.h +++ b/deps/v8/test/cctest/compiler/c-signature.h @@ -69,6 +69,10 @@ class CSignature : public MachineSignature { } } + static CSignature* FromMachine(Zone* zone, MachineSignature* msig) { + return reinterpret_cast<CSignature*>(msig); + } + static CSignature* New(Zone* zone, MachineType ret, MachineType p1 = kMachNone, MachineType p2 = kMachNone, MachineType p3 = kMachNone, MachineType p4 = kMachNone, diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h index dc265ea5fa..31a6d0f93b 100644 --- a/deps/v8/test/cctest/compiler/call-tester.h +++ b/deps/v8/test/cctest/compiler/call-tester.h @@ -304,6 +304,21 @@ class CallHelper { Isolate* isolate_; }; +// A call helper that calls the given code object assuming C calling convention. +template <typename T> +class CodeRunner : public CallHelper<T> { + public: + CodeRunner(Isolate* isolate, Handle<Code> code, CSignature* csig) + : CallHelper<T>(isolate, csig), code_(code) {} + virtual ~CodeRunner() {} + + virtual byte* Generate() { return code_->entry(); } + + private: + Handle<Code> code_; +}; + + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc index d05b282293..98957c7f01 100644 --- a/deps/v8/test/cctest/compiler/codegen-tester.cc +++ b/deps/v8/test/cctest/compiler/codegen-tester.cc @@ -368,8 +368,6 @@ void Int32BinopInputShapeTester::RunRight( } -#if V8_TURBOFAN_TARGET - TEST(ParametersEqual) { RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32); Node* p1 = m.Parameter(1); @@ -572,5 +570,3 @@ TEST(RunBinopTester) { FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); } } } - -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h index bc6d938ce1..d8ecc02fc2 100644 --- a/deps/v8/test/cctest/compiler/codegen-tester.h +++ b/deps/v8/test/cctest/compiler/codegen-tester.h @@ -34,8 +34,10 @@ class RawMachineAssemblerTester : public HandleAndZoneScope, p2, p3, p4)), RawMachineAssembler( main_isolate(), new (main_zone()) Graph(main_zone()), - CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1, - p2, p3, p4), + Linkage::GetSimplifiedCDescriptor( + main_zone(), + CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, + p1, p2, p3, p4)), kMachPtr, InstructionSelector::SupportedMachineOperatorFlags()) {} void CheckNumber(double expected, Object* number) { diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h index 54c62ab634..56ab514c65 100644 --- a/deps/v8/test/cctest/compiler/function-tester.h +++ b/deps/v8/test/cctest/compiler/function-tester.h @@ -13,15 +13,13 @@ #include "src/compiler/linkage.h" #include "src/compiler/pipeline.h" #include "src/execution.h" -#include "src/full-codegen.h" +#include "src/full-codegen/full-codegen.h" #include "src/handles.h" #include "src/objects-inl.h" #include "src/parser.h" #include "src/rewriter.h" #include "src/scopes.h" -#define USE_CRANKSHAFT 0 - namespace v8 { namespace internal { namespace compiler { @@ -156,7 +154,6 @@ class FunctionTester : public InitializedHandleScope { Handle<JSFunction> Compile(Handle<JSFunction> function) { // TODO(titzer): make this method private. -#if V8_TURBOFAN_TARGET Zone zone; ParseInfo parse_info(&zone, function); CompilationInfo info(&parse_info); @@ -181,19 +178,6 @@ class FunctionTester : public InitializedHandleScope { CHECK(!code.is_null()); info.context()->native_context()->AddOptimizedCode(*code); function->ReplaceCode(*code); -#elif USE_CRANKSHAFT - Handle<Code> unoptimized = Handle<Code>(function->code()); - Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized, - Compiler::NOT_CONCURRENT); - CHECK(!code.is_null()); -#if ENABLE_DISASSEMBLER - if (FLAG_print_opt_code) { - CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); - code->Disassemble("test code", tracing_scope.file()); - } -#endif - function->ReplaceCode(*code); -#endif return function; } @@ -212,7 +196,6 @@ class FunctionTester : public InitializedHandleScope { // Compile the given machine graph instead of the source of the function // and replace the JSFunction's code with the result. Handle<JSFunction> CompileGraph(Graph* graph) { - CHECK(Pipeline::SupportedTarget()); Zone zone; ParseInfo parse_info(&zone, function); CompilationInfo info(&parse_info); diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h index 7270293e0f..41c1e384be 100644 --- a/deps/v8/test/cctest/compiler/graph-builder-tester.h +++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h @@ -9,13 +9,12 @@ #include "test/cctest/cctest.h" #include "src/compiler/common-operator.h" -#include "src/compiler/graph-builder.h" #include "src/compiler/linkage.h" #include "src/compiler/machine-operator.h" +#include "src/compiler/operator-properties.h" #include "src/compiler/pipeline.h" #include "src/compiler/simplified-operator.h" #include "test/cctest/compiler/call-tester.h" -#include "test/cctest/compiler/simplified-graph-builder.h" namespace v8 { namespace internal { @@ -29,6 +28,12 @@ class GraphAndBuilders { main_machine_(zone), main_simplified_(zone) {} + Graph* graph() const { return main_graph_; } + Zone* zone() const { return graph()->zone(); } + CommonOperatorBuilder* common() { return &main_common_; } + MachineOperatorBuilder* machine() { return &main_machine_; } + SimplifiedOperatorBuilder* simplified() { return &main_simplified_; } + protected: // Prefixed with main_ to avoid naming conflicts. Graph* main_graph_; @@ -40,9 +45,8 @@ class GraphAndBuilders { template <typename ReturnType> class GraphBuilderTester : public HandleAndZoneScope, - private GraphAndBuilders, - public CallHelper<ReturnType>, - public SimplifiedGraphBuilder { + public GraphAndBuilders, + public CallHelper<ReturnType> { public: explicit GraphBuilderTester(MachineType p0 = kMachNone, MachineType p1 = kMachNone, @@ -54,8 +58,8 @@ class GraphBuilderTester : public HandleAndZoneScope, main_isolate(), CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1, p2, p3, p4)), - SimplifiedGraphBuilder(main_isolate(), main_graph_, &main_common_, - &main_machine_, &main_simplified_), + effect_(NULL), + return_(NULL), parameters_(main_zone()->template NewArray<Node*>(parameter_count())) { Begin(static_cast<int>(parameter_count())); InitParameters(); @@ -68,16 +72,214 @@ class GraphBuilderTester : public HandleAndZoneScope, return parameters_[index]; } - Factory* factory() const { return isolate()->factory(); } + Isolate* isolate() { return main_isolate(); } + Factory* factory() { return isolate()->factory(); } + + // Initialize graph and builder. + void Begin(int num_parameters) { + DCHECK(graph()->start() == NULL); + Node* start = graph()->NewNode(common()->Start(num_parameters + 3)); + graph()->SetStart(start); + effect_ = start; + } + + void Return(Node* value) { + return_ = + graph()->NewNode(common()->Return(), value, effect_, graph()->start()); + effect_ = NULL; + } + + // Close the graph. + void End() { + Node* end = graph()->NewNode(common()->End(1), return_); + graph()->SetEnd(end); + } + + Node* PointerConstant(void* value) { + intptr_t intptr_value = reinterpret_cast<intptr_t>(value); + return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value)) + : Int32Constant(static_cast<int>(intptr_value)); + } + Node* Int32Constant(int32_t value) { + return NewNode(common()->Int32Constant(value)); + } + Node* HeapConstant(Handle<HeapObject> object) { + Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object); + return NewNode(common()->HeapConstant(val)); + } + + Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); } + + Node* NumberEqual(Node* a, Node* b) { + return NewNode(simplified()->NumberEqual(), a, b); + } + Node* NumberLessThan(Node* a, Node* b) { + return NewNode(simplified()->NumberLessThan(), a, b); + } + Node* NumberLessThanOrEqual(Node* a, Node* b) { + return NewNode(simplified()->NumberLessThanOrEqual(), a, b); + } + Node* NumberAdd(Node* a, Node* b) { + return NewNode(simplified()->NumberAdd(), a, b); + } + Node* NumberSubtract(Node* a, Node* b) { + return NewNode(simplified()->NumberSubtract(), a, b); + } + Node* NumberMultiply(Node* a, Node* b) { + return NewNode(simplified()->NumberMultiply(), a, b); + } + Node* NumberDivide(Node* a, Node* b) { + return NewNode(simplified()->NumberDivide(), a, b); + } + Node* NumberModulus(Node* a, Node* b) { + return NewNode(simplified()->NumberModulus(), a, b); + } + Node* NumberToInt32(Node* a) { + return NewNode(simplified()->NumberToInt32(), a); + } + Node* NumberToUint32(Node* a) { + return NewNode(simplified()->NumberToUint32(), a); + } + + Node* StringEqual(Node* a, Node* b) { + return NewNode(simplified()->StringEqual(), a, b); + } + Node* StringLessThan(Node* a, Node* b) { + return NewNode(simplified()->StringLessThan(), a, b); + } + Node* StringLessThanOrEqual(Node* a, Node* b) { + return NewNode(simplified()->StringLessThanOrEqual(), a, b); + } + + Node* ChangeTaggedToInt32(Node* a) { + return NewNode(simplified()->ChangeTaggedToInt32(), a); + } + Node* ChangeTaggedToUint32(Node* a) { + return NewNode(simplified()->ChangeTaggedToUint32(), a); + } + Node* ChangeTaggedToFloat64(Node* a) { + return NewNode(simplified()->ChangeTaggedToFloat64(), a); + } + Node* ChangeInt32ToTagged(Node* a) { + return NewNode(simplified()->ChangeInt32ToTagged(), a); + } + Node* ChangeUint32ToTagged(Node* a) { + return NewNode(simplified()->ChangeUint32ToTagged(), a); + } + Node* ChangeFloat64ToTagged(Node* a) { + return NewNode(simplified()->ChangeFloat64ToTagged(), a); + } + Node* ChangeBoolToBit(Node* a) { + return NewNode(simplified()->ChangeBoolToBit(), a); + } + Node* ChangeBitToBool(Node* a) { + return NewNode(simplified()->ChangeBitToBool(), a); + } + + Node* LoadField(const FieldAccess& access, Node* object) { + return NewNode(simplified()->LoadField(access), object); + } + Node* StoreField(const FieldAccess& access, Node* object, Node* value) { + return NewNode(simplified()->StoreField(access), object, value); + } + Node* LoadElement(const ElementAccess& access, Node* object, Node* index) { + return NewNode(simplified()->LoadElement(access), object, index); + } + Node* StoreElement(const ElementAccess& access, Node* object, Node* index, + Node* value) { + return NewNode(simplified()->StoreElement(access), object, index, value); + } + + Node* NewNode(const Operator* op) { + return MakeNode(op, 0, static_cast<Node**>(NULL)); + } + + Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); } + + Node* NewNode(const Operator* op, Node* n1, Node* n2) { + Node* buffer[] = {n1, n2}; + return MakeNode(op, arraysize(buffer), buffer); + } + + Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) { + Node* buffer[] = {n1, n2, n3}; + return MakeNode(op, arraysize(buffer), buffer); + } + + Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) { + Node* buffer[] = {n1, n2, n3, n4}; + return MakeNode(op, arraysize(buffer), buffer); + } + + Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, + Node* n5) { + Node* buffer[] = {n1, n2, n3, n4, n5}; + return MakeNode(op, arraysize(buffer), buffer); + } + + Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, + Node* n5, Node* n6) { + Node* nodes[] = {n1, n2, n3, n4, n5, n6}; + return MakeNode(op, arraysize(nodes), nodes); + } + + Node* NewNode(const Operator* op, int value_input_count, + Node** value_inputs) { + return MakeNode(op, value_input_count, value_inputs); + } protected: + Node* MakeNode(const Operator* op, int value_input_count, + Node** value_inputs) { + DCHECK(op->ValueInputCount() == value_input_count); + + DCHECK(!OperatorProperties::HasContextInput(op)); + DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op)); + bool has_control = op->ControlInputCount() == 1; + bool has_effect = op->EffectInputCount() == 1; + + DCHECK(op->ControlInputCount() < 2); + DCHECK(op->EffectInputCount() < 2); + + Node* result = NULL; + if (!has_control && !has_effect) { + result = graph()->NewNode(op, value_input_count, value_inputs); + } else { + int input_count_with_deps = value_input_count; + if (has_control) ++input_count_with_deps; + if (has_effect) ++input_count_with_deps; + Node** buffer = zone()->template NewArray<Node*>(input_count_with_deps); + memcpy(buffer, value_inputs, kPointerSize * value_input_count); + Node** current_input = buffer + value_input_count; + if (has_effect) { + *current_input++ = effect_; + } + if (has_control) { + *current_input++ = graph()->start(); + } + result = graph()->NewNode(op, input_count_with_deps, buffer); + if (has_effect) { + effect_ = result; + } + // This graph builder does not support control flow. + CHECK_EQ(0, op->ControlOutputCount()); + } + + return result; + } + virtual byte* Generate() { - if (!Pipeline::SupportedBackend()) return NULL; if (code_.is_null()) { Zone* zone = graph()->zone(); CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone, this->csig_); code_ = Pipeline::GenerateCodeForTesting(main_isolate(), desc, graph()); +#ifdef ENABLE_DISASSEMBLER + if (!code_.is_null() && FLAG_print_opt_code) { + OFStream os(stdout); + code_.ToHandleChecked()->Disassemble("test code", os); + } +#endif } return code_.ToHandleChecked()->entry(); } @@ -92,6 +294,8 @@ class GraphBuilderTester : public HandleAndZoneScope, size_t parameter_count() const { return this->csig_->parameter_count(); } private: + Node* effect_; + Node* return_; Node** parameters_; MaybeHandle<Code> code_; }; diff --git a/deps/v8/test/cctest/compiler/instruction-selector-tester.h b/deps/v8/test/cctest/compiler/instruction-selector-tester.h deleted file mode 100644 index 3a28b2e5df..0000000000 --- a/deps/v8/test/cctest/compiler/instruction-selector-tester.h +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_ -#define V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_ - -#include <deque> -#include <set> - -#include "src/compiler/instruction-selector.h" -#include "src/compiler/raw-machine-assembler.h" -#include "src/ostreams.h" -#include "test/cctest/cctest.h" - -namespace v8 { -namespace internal { -namespace compiler { - -typedef std::set<int> VirtualRegisterSet; - -enum InstructionSelectorTesterMode { kTargetMode, kInternalMode }; - -class InstructionSelectorTester : public HandleAndZoneScope, - public RawMachineAssembler { - public: - enum Mode { kTargetMode, kInternalMode }; - - static const int kParameterCount = 3; - static MachineType* BuildParameterArray(Zone* zone) { - MachineType* array = zone->NewArray<MachineType>(kParameterCount); - for (int i = 0; i < kParameterCount; ++i) { - array[i] = kMachInt32; - } - return array; - } - - InstructionSelectorTester() - : RawMachineAssembler( - new (main_zone()) Graph(main_zone()), - new (main_zone()) MachineCallDescriptorBuilder( - kMachInt32, kParameterCount, BuildParameterArray(main_zone())), - kMachPtr) {} - - void SelectInstructions(CpuFeature feature) { - SelectInstructions(InstructionSelector::Features(feature)); - } - - void SelectInstructions(CpuFeature feature1, CpuFeature feature2) { - SelectInstructions(InstructionSelector::Features(feature1, feature2)); - } - - void SelectInstructions(Mode mode = kTargetMode) { - SelectInstructions(InstructionSelector::Features(), mode); - } - - void SelectInstructions(InstructionSelector::Features features, - Mode mode = kTargetMode) { - OFStream out(stdout); - Schedule* schedule = Export(); - CHECK_NE(0, graph()->NodeCount()); - CompilationInfo info(main_isolate(), main_zone()); - Linkage linkage(&info, call_descriptor()); - InstructionSequence sequence(&linkage, graph(), schedule); - SourcePositionTable source_positions(graph()); - InstructionSelector selector(&sequence, &source_positions, features); - selector.SelectInstructions(); - out << "--- Code sequence after instruction selection --- " << endl - << sequence; - for (InstructionSequence::const_iterator i = sequence.begin(); - i != sequence.end(); ++i) { - Instruction* instr = *i; - if (instr->opcode() < 0) continue; - if (mode == kTargetMode) { - switch (ArchOpcodeField::decode(instr->opcode())) { -#define CASE(Name) \ - case k##Name: \ - break; - TARGET_ARCH_OPCODE_LIST(CASE) -#undef CASE - default: - continue; - } - } - code.push_back(instr); - } - for (int vreg = 0; vreg < sequence.VirtualRegisterCount(); ++vreg) { - if (sequence.IsDouble(vreg)) { - CHECK(!sequence.IsReference(vreg)); - doubles.insert(vreg); - } - if (sequence.IsReference(vreg)) { - CHECK(!sequence.IsDouble(vreg)); - references.insert(vreg); - } - } - immediates.assign(sequence.immediates().begin(), - sequence.immediates().end()); - } - - int32_t ToInt32(const InstructionOperand* operand) const { - size_t i = operand->index(); - CHECK(i < immediates.size()); - CHECK_EQ(InstructionOperand::IMMEDIATE, operand->kind()); - return immediates[i].ToInt32(); - } - - std::deque<Instruction*> code; - VirtualRegisterSet doubles; - VirtualRegisterSet references; - std::deque<Constant> immediates; -}; - - -static inline void CheckSameVreg(InstructionOperand* exp, - InstructionOperand* val) { - CHECK_EQ(InstructionOperand::UNALLOCATED, exp->kind()); - CHECK_EQ(InstructionOperand::UNALLOCATED, val->kind()); - CHECK_EQ(UnallocatedOperand::cast(exp)->virtual_register(), - UnallocatedOperand::cast(val)->virtual_register()); -} - -} // namespace compiler -} // namespace internal -} // namespace v8 - -#endif // V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_ diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc deleted file mode 100644 index 4d57719eff..0000000000 --- a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "test/cctest/compiler/simplified-graph-builder.h" - -#include "src/compiler/operator-properties.h" - -namespace v8 { -namespace internal { -namespace compiler { - -SimplifiedGraphBuilder::SimplifiedGraphBuilder( - Isolate* isolate, Graph* graph, CommonOperatorBuilder* common, - MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified) - : GraphBuilder(isolate, graph), - effect_(NULL), - return_(NULL), - common_(common), - machine_(machine), - simplified_(simplified) {} - - -void SimplifiedGraphBuilder::Begin(int num_parameters) { - DCHECK(graph()->start() == NULL); - Node* start = graph()->NewNode(common()->Start(num_parameters + 3)); - graph()->SetStart(start); - effect_ = start; -} - - -void SimplifiedGraphBuilder::Return(Node* value) { - return_ = - graph()->NewNode(common()->Return(), value, effect_, graph()->start()); - effect_ = NULL; -} - - -void SimplifiedGraphBuilder::End() { - Node* end = graph()->NewNode(common()->End(1), return_); - graph()->SetEnd(end); -} - - -Node* SimplifiedGraphBuilder::MakeNode(const Operator* op, - int value_input_count, - Node** value_inputs, bool incomplete) { - DCHECK(op->ValueInputCount() == value_input_count); - - DCHECK(!OperatorProperties::HasContextInput(op)); - DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op)); - bool has_control = op->ControlInputCount() == 1; - bool has_effect = op->EffectInputCount() == 1; - - DCHECK(op->ControlInputCount() < 2); - DCHECK(op->EffectInputCount() < 2); - - Node* result = NULL; - if (!has_control && !has_effect) { - result = graph()->NewNode(op, value_input_count, value_inputs, incomplete); - } else { - int input_count_with_deps = value_input_count; - if (has_control) ++input_count_with_deps; - if (has_effect) ++input_count_with_deps; - Node** buffer = zone()->NewArray<Node*>(input_count_with_deps); - memcpy(buffer, value_inputs, kPointerSize * value_input_count); - Node** current_input = buffer + value_input_count; - if (has_effect) { - *current_input++ = effect_; - } - if (has_control) { - *current_input++ = graph()->start(); - } - result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete); - if (has_effect) { - effect_ = result; - } - // This graph builder does not support control flow. - CHECK_EQ(0, op->ControlOutputCount()); - } - - return result; -} - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.h b/deps/v8/test/cctest/compiler/simplified-graph-builder.h deleted file mode 100644 index 50c51d5ed8..0000000000 --- a/deps/v8/test/cctest/compiler/simplified-graph-builder.h +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_ -#define V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_ - -#include "src/compiler/common-operator.h" -#include "src/compiler/graph-builder.h" -#include "src/compiler/machine-operator.h" -#include "src/compiler/simplified-operator.h" -#include "test/cctest/cctest.h" -#include "test/cctest/compiler/call-tester.h" - -namespace v8 { -namespace internal { -namespace compiler { - -class SimplifiedGraphBuilder : public GraphBuilder { - public: - SimplifiedGraphBuilder(Isolate* isolate, Graph* graph, - CommonOperatorBuilder* common, - MachineOperatorBuilder* machine, - SimplifiedOperatorBuilder* simplified); - virtual ~SimplifiedGraphBuilder() {} - - Zone* zone() const { return graph()->zone(); } - CommonOperatorBuilder* common() const { return common_; } - MachineOperatorBuilder* machine() const { return machine_; } - SimplifiedOperatorBuilder* simplified() const { return simplified_; } - - // Initialize graph and builder. - void Begin(int num_parameters); - - void Return(Node* value); - - // Close the graph. - void End(); - - Node* PointerConstant(void* value) { - intptr_t intptr_value = reinterpret_cast<intptr_t>(value); - return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value)) - : Int32Constant(static_cast<int>(intptr_value)); - } - Node* Int32Constant(int32_t value) { - return NewNode(common()->Int32Constant(value)); - } - Node* HeapConstant(Handle<HeapObject> object) { - Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object); - return NewNode(common()->HeapConstant(val)); - } - - Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); } - - Node* NumberEqual(Node* a, Node* b) { - return NewNode(simplified()->NumberEqual(), a, b); - } - Node* NumberLessThan(Node* a, Node* b) { - return NewNode(simplified()->NumberLessThan(), a, b); - } - Node* NumberLessThanOrEqual(Node* a, Node* b) { - return NewNode(simplified()->NumberLessThanOrEqual(), a, b); - } - Node* NumberAdd(Node* a, Node* b) { - return NewNode(simplified()->NumberAdd(), a, b); - } - Node* NumberSubtract(Node* a, Node* b) { - return NewNode(simplified()->NumberSubtract(), a, b); - } - Node* NumberMultiply(Node* a, Node* b) { - return NewNode(simplified()->NumberMultiply(), a, b); - } - Node* NumberDivide(Node* a, Node* b) { - return NewNode(simplified()->NumberDivide(), a, b); - } - Node* NumberModulus(Node* a, Node* b) { - return NewNode(simplified()->NumberModulus(), a, b); - } - Node* NumberToInt32(Node* a) { - return NewNode(simplified()->NumberToInt32(), a); - } - Node* NumberToUint32(Node* a) { - return NewNode(simplified()->NumberToUint32(), a); - } - - Node* StringEqual(Node* a, Node* b) { - return NewNode(simplified()->StringEqual(), a, b); - } - Node* StringLessThan(Node* a, Node* b) { - return NewNode(simplified()->StringLessThan(), a, b); - } - Node* StringLessThanOrEqual(Node* a, Node* b) { - return NewNode(simplified()->StringLessThanOrEqual(), a, b); - } - - Node* ChangeTaggedToInt32(Node* a) { - return NewNode(simplified()->ChangeTaggedToInt32(), a); - } - Node* ChangeTaggedToUint32(Node* a) { - return NewNode(simplified()->ChangeTaggedToUint32(), a); - } - Node* ChangeTaggedToFloat64(Node* a) { - return NewNode(simplified()->ChangeTaggedToFloat64(), a); - } - Node* ChangeInt32ToTagged(Node* a) { - return NewNode(simplified()->ChangeInt32ToTagged(), a); - } - Node* ChangeUint32ToTagged(Node* a) { - return NewNode(simplified()->ChangeUint32ToTagged(), a); - } - Node* ChangeFloat64ToTagged(Node* a) { - return NewNode(simplified()->ChangeFloat64ToTagged(), a); - } - Node* ChangeBoolToBit(Node* a) { - return NewNode(simplified()->ChangeBoolToBit(), a); - } - Node* ChangeBitToBool(Node* a) { - return NewNode(simplified()->ChangeBitToBool(), a); - } - - Node* LoadField(const FieldAccess& access, Node* object) { - return NewNode(simplified()->LoadField(access), object); - } - Node* StoreField(const FieldAccess& access, Node* object, Node* value) { - return NewNode(simplified()->StoreField(access), object, value); - } - Node* LoadElement(const ElementAccess& access, Node* object, Node* index) { - return NewNode(simplified()->LoadElement(access), object, index); - } - Node* StoreElement(const ElementAccess& access, Node* object, Node* index, - Node* value) { - return NewNode(simplified()->StoreElement(access), object, index, value); - } - - protected: - virtual Node* MakeNode(const Operator* op, int value_input_count, - Node** value_inputs, bool incomplete) final; - - private: - Node* effect_; - Node* return_; - CommonOperatorBuilder* common_; - MachineOperatorBuilder* machine_; - SimplifiedOperatorBuilder* simplified_; -}; - -} // namespace compiler -} // namespace internal -} // namespace v8 - -#endif // V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_ diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc index fa4da9a736..7d7690bad6 100644 --- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc +++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc @@ -8,8 +8,6 @@ #include "test/cctest/cctest.h" #include "test/cctest/compiler/codegen-tester.h" -#if V8_TURBOFAN_TARGET - using namespace v8::internal; using namespace v8::internal::compiler; @@ -110,5 +108,3 @@ TEST(ProfileLoop) { m.Expect(arraysize(expected), expected); } } - -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc index 58202a61b0..06d380a6a2 100644 --- a/deps/v8/test/cctest/compiler/test-branch-combine.cc +++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc @@ -8,8 +8,6 @@ #include "test/cctest/compiler/codegen-tester.h" #include "test/cctest/compiler/value-helper.h" -#if V8_TURBOFAN_TARGET - using namespace v8::internal; using namespace v8::internal::compiler; @@ -459,4 +457,3 @@ TEST(BranchCombineFloat64Compares) { } } } -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc index 04b5b9176b..b6b48bdac4 100644 --- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc +++ b/deps/v8/test/cctest/compiler/test-changes-lowering.cc @@ -147,7 +147,6 @@ TEST(RunChangeTaggedToInt32) { ChangesLoweringTester<int32_t> t(kMachAnyTagged); t.BuildAndLower(t.simplified()->ChangeTaggedToInt32()); - if (Pipeline::SupportedTarget()) { FOR_INT32_INPUTS(i) { int32_t input = *i; @@ -167,7 +166,6 @@ TEST(RunChangeTaggedToInt32) { int32_t result = t.Call(*number); CHECK_EQ(input, result); } - } } } @@ -177,7 +175,6 @@ TEST(RunChangeTaggedToUint32) { ChangesLoweringTester<uint32_t> t(kMachAnyTagged); t.BuildAndLower(t.simplified()->ChangeTaggedToUint32()); - if (Pipeline::SupportedTarget()) { FOR_UINT32_INPUTS(i) { uint32_t input = *i; @@ -198,7 +195,6 @@ TEST(RunChangeTaggedToUint32) { CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result)); } } - } } @@ -211,7 +207,7 @@ TEST(RunChangeTaggedToFloat64) { t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)), &result); - if (Pipeline::SupportedTarget()) { + { FOR_INT32_INPUTS(i) { int32_t input = *i; @@ -234,7 +230,7 @@ TEST(RunChangeTaggedToFloat64) { } } - if (Pipeline::SupportedTarget()) { + { FOR_FLOAT64_INPUTS(i) { double input = *i; { @@ -257,13 +253,13 @@ TEST(RunChangeBoolToBit) { ChangesLoweringTester<int32_t> t(kMachAnyTagged); t.BuildAndLower(t.simplified()->ChangeBoolToBit()); - if (Pipeline::SupportedTarget()) { + { Object* true_obj = t.heap()->true_value(); int32_t result = t.Call(true_obj); CHECK_EQ(1, result); } - if (Pipeline::SupportedTarget()) { + { Object* false_obj = t.heap()->false_value(); int32_t result = t.Call(false_obj); CHECK_EQ(0, result); @@ -275,122 +271,15 @@ TEST(RunChangeBitToBool) { ChangesLoweringTester<Object*> t(kMachInt32); t.BuildAndLower(t.simplified()->ChangeBitToBool()); - if (Pipeline::SupportedTarget()) { + { Object* result = t.Call(1); Object* true_obj = t.heap()->true_value(); CHECK_EQ(true_obj, result); } - if (Pipeline::SupportedTarget()) { + { Object* result = t.Call(0); Object* false_obj = t.heap()->false_value(); CHECK_EQ(false_obj, result); } } - - -#if V8_TURBOFAN_BACKEND -// TODO(titzer): disabled on ARM - -TEST(RunChangeInt32ToTaggedSmi) { - ChangesLoweringTester<Object*> t; - int32_t input; - t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(), - t.machine()->Load(kMachInt32), &input); - - if (Pipeline::SupportedTarget()) { - FOR_INT32_INPUTS(i) { - input = *i; - if (!Smi::IsValid(input)) continue; - Object* result = t.Call(); - t.CheckNumber(static_cast<double>(input), result); - } - } -} - - -TEST(RunChangeUint32ToTaggedSmi) { - ChangesLoweringTester<Object*> t; - uint32_t input; - t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(), - t.machine()->Load(kMachUint32), &input); - - if (Pipeline::SupportedTarget()) { - FOR_UINT32_INPUTS(i) { - input = *i; - if (input > static_cast<uint32_t>(Smi::kMaxValue)) continue; - Object* result = t.Call(); - double expected = static_cast<double>(input); - t.CheckNumber(expected, result); - } - } -} - - -TEST(RunChangeInt32ToTagged) { - ChangesLoweringTester<Object*> t; - int32_t input; - t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(), - t.machine()->Load(kMachInt32), &input); - - if (Pipeline::SupportedTarget()) { - for (int m = 0; m < 3; m++) { // Try 3 GC modes. - FOR_INT32_INPUTS(i) { - if (m == 0) CcTest::heap()->EnableInlineAllocation(); - if (m == 1) CcTest::heap()->DisableInlineAllocation(); - if (m == 2) SimulateFullSpace(CcTest::heap()->new_space()); - - input = *i; - Object* result = t.CallWithPotentialGC<Object>(); - t.CheckNumber(static_cast<double>(input), result); - } - } - } -} - - -TEST(RunChangeUint32ToTagged) { - ChangesLoweringTester<Object*> t; - uint32_t input; - t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(), - t.machine()->Load(kMachUint32), &input); - - if (Pipeline::SupportedTarget()) { - for (int m = 0; m < 3; m++) { // Try 3 GC modes. - FOR_UINT32_INPUTS(i) { - if (m == 0) CcTest::heap()->EnableInlineAllocation(); - if (m == 1) CcTest::heap()->DisableInlineAllocation(); - if (m == 2) SimulateFullSpace(CcTest::heap()->new_space()); - - input = *i; - Object* result = t.CallWithPotentialGC<Object>(); - double expected = static_cast<double>(input); - t.CheckNumber(expected, result); - } - } - } -} - - -TEST(RunChangeFloat64ToTagged) { - ChangesLoweringTester<Object*> t; - double input; - t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(), - t.machine()->Load(kMachFloat64), &input); - - if (Pipeline::SupportedTarget()) { - for (int m = 0; m < 3; m++) { // Try 3 GC modes. - FOR_FLOAT64_INPUTS(i) { - if (m == 0) CcTest::heap()->EnableInlineAllocation(); - if (m == 1) CcTest::heap()->DisableInlineAllocation(); - if (m == 2) SimulateFullSpace(CcTest::heap()->new_space()); - - input = *i; - Object* result = t.CallWithPotentialGC<Object>(); - t.CheckNumber(input, result); - } - } - } -} - -#endif // V8_TURBOFAN_BACKEND diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc index 252c43133e..29da5890ea 100644 --- a/deps/v8/test/cctest/compiler/test-linkage.cc +++ b/deps/v8/test/cctest/compiler/test-linkage.cc @@ -19,8 +19,6 @@ #include "src/compiler/schedule.h" #include "test/cctest/cctest.h" -#if V8_TURBOFAN_TARGET - using namespace v8::internal; using namespace v8::internal::compiler; @@ -80,7 +78,7 @@ TEST(TestLinkageCodeStubIncoming) { CompilationInfo info(&stub, isolate, &zone); CallDescriptor* descriptor = Linkage::ComputeIncoming(&zone, &info); CHECK(descriptor); - CHECK_EQ(1, static_cast<int>(descriptor->JSParameterCount())); + CHECK_EQ(0, static_cast<int>(descriptor->StackParameterCount())); CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount())); CHECK_EQ(Operator::kNoProperties, descriptor->properties()); CHECK_EQ(false, descriptor->IsJSFunctionCall()); @@ -113,5 +111,3 @@ TEST(TestLinkageRuntimeCall) { TEST(TestLinkageStubCall) { // TODO(titzer): test linkage creation for outgoing stub calls. } - -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc index ad05273995..b59f181f5e 100644 --- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc +++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc @@ -36,7 +36,7 @@ struct TestHelper : public HandleAndZoneScope { CHECK(Rewriter::Rewrite(&parse_info)); CHECK(Scope::Analyze(&parse_info)); - Scope* scope = info.function()->scope(); + Scope* scope = info.literal()->scope(); AstValueFactory* factory = parse_info.ast_value_factory(); CHECK(scope); diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc index e635da797d..0ac33637da 100644 --- a/deps/v8/test/cctest/compiler/test-operator.cc +++ b/deps/v8/test/cctest/compiler/test-operator.cc @@ -69,10 +69,10 @@ TEST(TestOperator_Equals) { } -static SmartArrayPointer<const char> OperatorToString(Operator* op) { +static v8::base::SmartArrayPointer<const char> OperatorToString(Operator* op) { std::ostringstream os; os << *op; - return SmartArrayPointer<const char>(StrDup(os.str().c_str())); + return v8::base::SmartArrayPointer<const char>(StrDup(os.str().c_str())); } diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc index 84550d502a..8996718644 100644 --- a/deps/v8/test/cctest/compiler/test-pipeline.cc +++ b/deps/v8/test/cctest/compiler/test-pipeline.cc @@ -21,13 +21,8 @@ static void RunPipeline(Zone* zone, const char* source) { CompilationInfo info(&parse_info); Pipeline pipeline(&info); -#if V8_TURBOFAN_TARGET Handle<Code> code = pipeline.GenerateCode(); - CHECK(Pipeline::SupportedTarget()); CHECK(!code.is_null()); -#else - USE(pipeline); -#endif } diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc index d895924324..aedf668f44 100644 --- a/deps/v8/test/cctest/compiler/test-run-deopt.cc +++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc @@ -4,14 +4,13 @@ #include "src/v8.h" +#include "src/frames-inl.h" #include "test/cctest/cctest.h" #include "test/cctest/compiler/function-tester.h" using namespace v8::internal; using namespace v8::internal::compiler; -#if V8_TURBOFAN_TARGET - static void IsOptimized(const v8::FunctionCallbackInfo<v8::Value>& args) { JavaScriptFrameIterator it(CcTest::i_isolate()); JavaScriptFrame* frame = it.frame(); @@ -103,7 +102,6 @@ TEST(DeoptExceptionHandlerFinally) { #endif } -#endif TEST(DeoptTrivial) { FLAG_allow_natives_syntax = true; diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc index 7f8ae25619..1b2559fc5f 100644 --- a/deps/v8/test/cctest/compiler/test-run-inlining.cc +++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc @@ -4,10 +4,9 @@ #include "src/v8.h" +#include "src/frames-inl.h" #include "test/cctest/compiler/function-tester.h" -#if V8_TURBOFAN_TARGET - using namespace v8::internal; using namespace v8::internal::compiler; @@ -574,5 +573,3 @@ TEST(InlineMutuallyRecursive) { InstallAssertInlineCountHelper(CcTest::isolate()); T.CheckCall(T.Val(42), T.Val(1)); } - -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc index 8de2d7a214..893c2fa460 100644 --- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc +++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc @@ -132,20 +132,6 @@ TEST(ConstructorCall) { // TODO(titzer): factor these out into test-runtime-calls.cc -TEST(RuntimeCallCPP1) { - FLAG_allow_natives_syntax = true; - FunctionTester T("(function(a) { return %ToBool(a); })"); - - T.CheckCall(T.true_value(), T.Val(23), T.undefined()); - T.CheckCall(T.true_value(), T.Val(4.2), T.undefined()); - T.CheckCall(T.true_value(), T.Val("str"), T.undefined()); - T.CheckCall(T.true_value(), T.true_value(), T.undefined()); - T.CheckCall(T.false_value(), T.false_value(), T.undefined()); - T.CheckCall(T.false_value(), T.undefined(), T.undefined()); - T.CheckCall(T.false_value(), T.Val(0.0), T.undefined()); -} - - TEST(RuntimeCallCPP2) { FLAG_allow_natives_syntax = true; FunctionTester T("(function(a,b) { return %NumberAdd(a, b); })"); diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc index b1fc36968f..8b14dab46c 100644 --- a/deps/v8/test/cctest/compiler/test-run-machops.cc +++ b/deps/v8/test/cctest/compiler/test-run-machops.cc @@ -13,8 +13,6 @@ #include "test/cctest/compiler/codegen-tester.h" #include "test/cctest/compiler/value-helper.h" -#if V8_TURBOFAN_TARGET - using namespace v8::base; using namespace v8::internal; using namespace v8::internal::compiler; @@ -82,7 +80,14 @@ TEST(CodeGenInt32Binop) { } -#if V8_TURBOFAN_BACKEND_64 +TEST(CodeGenNop) { + RawMachineAssemblerTester<void> m; + m.Return(m.Int32Constant(0)); + m.GenerateCode(); +} + + +#if V8_TARGET_ARCH_64_BIT static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) { switch (index) { case 0: @@ -136,7 +141,7 @@ TEST(CodeGenInt64Binop) { // TODO(titzer): add tests that run 64-bit integer operations. -#endif // V8_TURBOFAN_BACKEND_64 +#endif // V8_TARGET_ARCH_64_BIT TEST(RunGoto) { @@ -5274,5 +5279,3 @@ TEST(RunCallCFunction8) { } #endif // USE_SIMULATOR - -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc new file mode 100644 index 0000000000..2e255c7729 --- /dev/null +++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc @@ -0,0 +1,985 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/assembler.h" +#include "src/codegen.h" +#include "src/compiler/linkage.h" +#include "src/compiler/machine-type.h" +#include "src/compiler/raw-machine-assembler.h" + +#include "test/cctest/cctest.h" +#include "test/cctest/compiler/codegen-tester.h" +#include "test/cctest/compiler/graph-builder-tester.h" +#include "test/cctest/compiler/value-helper.h" + +using namespace v8::base; +using namespace v8::internal; +using namespace v8::internal::compiler; + +typedef RawMachineAssembler::Label MLabel; + +#if V8_TARGET_ARCH_ARM64 +// TODO(titzer): fix native stack parameters on arm64 +#define DISABLE_NATIVE_STACK_PARAMS true +#else +#define DISABLE_NATIVE_STACK_PARAMS false +#endif + +namespace { +typedef float float32; +typedef double float64; + +// Picks a representative pair of integers from the given range. +// If there are less than {max_pairs} possible pairs, do them all, otherwise try +// to select a representative set. +class Pairs { + public: + Pairs(int max_pairs, int range) + : range_(range), + max_pairs_(std::min(max_pairs, range_ * range_)), + counter_(0) {} + + bool More() { return counter_ < max_pairs_; } + + void Next(int* r0, int* r1, bool same_is_ok) { + do { + // Find the next pair. + if (exhaustive()) { + *r0 = counter_ % range_; + *r1 = counter_ / range_; + } else { + // Try each integer at least once for both r0 and r1. + int index = counter_ / 2; + if (counter_ & 1) { + *r0 = index % range_; + *r1 = index / range_; + } else { + *r1 = index % range_; + *r0 = index / range_; + } + } + counter_++; + if (same_is_ok) break; + if (*r0 == *r1) { + if (counter_ >= max_pairs_) { + // For the last hurrah, reg#0 with reg#n-1 + *r0 = 0; + *r1 = range_ - 1; + break; + } + } + } while (true); + + DCHECK(*r0 >= 0 && *r0 < range_); + DCHECK(*r1 >= 0 && *r1 < range_); + } + + private: + int range_; + int max_pairs_; + int counter_; + bool exhaustive() { return max_pairs_ == (range_ * range_); } +}; + + +// Pairs of general purpose registers. +class RegisterPairs : public Pairs { + public: + RegisterPairs() : Pairs(100, Register::kMaxNumAllocatableRegisters) {} +}; + + +// Pairs of double registers. +class Float32RegisterPairs : public Pairs { + public: + Float32RegisterPairs() + : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {} +}; + + +// Pairs of double registers. +class Float64RegisterPairs : public Pairs { + public: + Float64RegisterPairs() + : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {} +}; + + +// Helper for allocating either an GP or FP reg, or the next stack slot. +struct Allocator { + Allocator(int* gp, int gpc, int* fp, int fpc) + : gp_count(gpc), + gp_offset(0), + gp_regs(gp), + fp_count(fpc), + fp_offset(0), + fp_regs(fp), + stack_offset(0) {} + + int gp_count; + int gp_offset; + int* gp_regs; + + int fp_count; + int fp_offset; + int* fp_regs; + + int stack_offset; + + LinkageLocation Next(MachineType type) { + if (IsFloatingPoint(type)) { + // Allocate a floating point register/stack location. + if (fp_offset < fp_count) { + return LinkageLocation::ForRegister(fp_regs[fp_offset++]); + } else { + int offset = -1 - stack_offset; + stack_offset += StackWords(type); + return LinkageLocation::ForCallerFrameSlot(offset); + } + } else { + // Allocate a general purpose register/stack location. + if (gp_offset < gp_count) { + return LinkageLocation::ForRegister(gp_regs[gp_offset++]); + } else { + int offset = -1 - stack_offset; + stack_offset += StackWords(type); + return LinkageLocation::ForCallerFrameSlot(offset); + } + } + } + bool IsFloatingPoint(MachineType type) { + return RepresentationOf(type) == kRepFloat32 || + RepresentationOf(type) == kRepFloat64; + } + int StackWords(MachineType type) { + // TODO(titzer): hack. float32 occupies 8 bytes on stack. + int size = (RepresentationOf(type) == kRepFloat32 || + RepresentationOf(type) == kRepFloat64) + ? kDoubleSize + : ElementSizeOf(type); + return size <= kPointerSize ? 1 : size / kPointerSize; + } + void Reset() { + fp_offset = 0; + gp_offset = 0; + stack_offset = 0; + } +}; + + +class RegisterConfig { + public: + RegisterConfig(Allocator& p, Allocator& r) : params(p), rets(r) {} + + CallDescriptor* Create(Zone* zone, MachineSignature* msig) { + rets.Reset(); + params.Reset(); + + LocationSignature::Builder locations(zone, msig->return_count(), + msig->parameter_count()); + // Add return location(s). + const int return_count = static_cast<int>(locations.return_count_); + for (int i = 0; i < return_count; i++) { + locations.AddReturn(rets.Next(msig->GetReturn(i))); + } + + // Add register and/or stack parameter(s). + const int parameter_count = static_cast<int>(msig->parameter_count()); + for (int i = 0; i < parameter_count; i++) { + locations.AddParam(params.Next(msig->GetParam(i))); + } + + const RegList kCalleeSaveRegisters = 0; + const RegList kCalleeSaveFPRegisters = 0; + + MachineType target_type = compiler::kMachAnyTagged; + LinkageLocation target_loc = LinkageLocation::ForAnyRegister(); + int stack_param_count = params.stack_offset; + return new (zone) CallDescriptor( // -- + CallDescriptor::kCallCodeObject, // kind + target_type, // target MachineType + target_loc, // target location + msig, // machine_sig + locations.Build(), // location_sig + stack_param_count, // stack_parameter_count + compiler::Operator::kNoProperties, // properties + kCalleeSaveRegisters, // callee-saved registers + kCalleeSaveFPRegisters, // callee-saved fp regs + CallDescriptor::kNoFlags, // flags + "c-call"); + } + + private: + Allocator& params; + Allocator& rets; +}; + +const int kMaxParamCount = 64; + +MachineType kIntTypes[kMaxParamCount + 1] = { + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, + kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32}; + + +// For making uniform int32 signatures shorter. +class Int32Signature : public MachineSignature { + public: + explicit Int32Signature(int param_count) + : MachineSignature(1, param_count, kIntTypes) { + CHECK(param_count <= kMaxParamCount); + } +}; + + +Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph, + Schedule* schedule = nullptr) { + Isolate* isolate = CcTest::InitIsolateOnce(); + Handle<Code> code = + Pipeline::GenerateCodeForTesting(isolate, desc, graph, schedule); + CHECK(!code.is_null()); +#ifdef ENABLE_DISASSEMBLER + if (FLAG_print_opt_code) { + OFStream os(stdout); + code->Disassemble(name, os); + } +#endif + return code; +} + + +Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) { + Zone zone; + MachineSignature* msig = + const_cast<MachineSignature*>(desc->GetMachineSignature()); + int param_count = static_cast<int>(msig->parameter_count()); + GraphAndBuilders caller(&zone); + { + GraphAndBuilders& b = caller; + Node* start = b.graph()->NewNode(b.common()->Start(param_count + 3)); + b.graph()->SetStart(start); + Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner); + Node* target = b.graph()->NewNode(b.common()->HeapConstant(unique)); + + // Add arguments to the call. + Node** args = zone.NewArray<Node*>(param_count + 3); + int index = 0; + args[index++] = target; + for (int i = 0; i < param_count; i++) { + args[index] = b.graph()->NewNode(b.common()->Parameter(i), start); + index++; + } + args[index++] = start; // effect. + args[index++] = start; // control. + + // Build the call and return nodes. + Node* call = + b.graph()->NewNode(b.common()->Call(desc), param_count + 3, args); + Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start); + b.graph()->SetEnd(ret); + } + + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig); + + return CompileGraph("wrapper", cdesc, caller.graph()); +} + + +template <typename CType> +class ArgsBuffer { + public: + static const int kMaxParamCount = 64; + + explicit ArgsBuffer(int count, int seed = 1) : count_(count), seed_(seed) { + // initialize the buffer with "seed 0" + seed_ = 0; + Mutate(); + seed_ = seed; + } + + class Sig : public MachineSignature { + public: + explicit Sig(int param_count) + : MachineSignature(1, param_count, MachTypes()) { + CHECK(param_count <= kMaxParamCount); + } + }; + + static MachineType* MachTypes() { + MachineType t = MachineTypeForC<CType>(); + static MachineType kTypes[kMaxParamCount + 1] = { + t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, + t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, + t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t}; + return kTypes; + } + + Node* MakeConstant(RawMachineAssembler& raw, int32_t value) { + return raw.Int32Constant(value); + } + + Node* MakeConstant(RawMachineAssembler& raw, int64_t value) { + return raw.Int64Constant(value); + } + + Node* MakeConstant(RawMachineAssembler& raw, float32 value) { + return raw.Float32Constant(value); + } + + Node* MakeConstant(RawMachineAssembler& raw, float64 value) { + return raw.Float64Constant(value); + } + + Node* LoadInput(RawMachineAssembler& raw, Node* base, int index) { + Node* offset = raw.Int32Constant(index * sizeof(CType)); + return raw.Load(MachineTypeForC<CType>(), base, offset); + } + + Node* StoreOutput(RawMachineAssembler& raw, Node* value) { + Node* base = raw.PointerConstant(&output); + Node* offset = raw.Int32Constant(0); + return raw.Store(MachineTypeForC<CType>(), base, offset, value); + } + + // Computes the next set of inputs by updating the {input} array. + void Mutate(); + + void Reset() { memset(input, 0, sizeof(input)); } + + int count_; + int seed_; + CType input[kMaxParamCount]; + CType output; +}; + + +template <> +void ArgsBuffer<int32_t>::Mutate() { + uint32_t base = 1111111111u * seed_; + for (int j = 0; j < count_ && j < kMaxParamCount; j++) { + input[j] = static_cast<int32_t>(256 + base + j + seed_ * 13); + } + output = -1; + seed_++; +} + + +template <> +void ArgsBuffer<int64_t>::Mutate() { + uint64_t base = 11111111111111111ull * seed_; + for (int j = 0; j < count_ && j < kMaxParamCount; j++) { + input[j] = static_cast<int64_t>(256 + base + j + seed_ * 13); + } + output = -1; + seed_++; +} + + +template <> +void ArgsBuffer<float32>::Mutate() { + float64 base = -33.25 * seed_; + for (int j = 0; j < count_ && j < kMaxParamCount; j++) { + input[j] = 256 + base + j + seed_ * 13; + } + output = std::numeric_limits<float32>::quiet_NaN(); + seed_++; +} + + +template <> +void ArgsBuffer<float64>::Mutate() { + float64 base = -111.25 * seed_; + for (int j = 0; j < count_ && j < kMaxParamCount; j++) { + input[j] = 256 + base + j + seed_ * 13; + } + output = std::numeric_limits<float64>::quiet_NaN(); + seed_++; +} + + +int ParamCount(CallDescriptor* desc) { + return static_cast<int>(desc->GetMachineSignature()->parameter_count()); +} + + +template <typename CType> +class Computer { + public: + static void Run(CallDescriptor* desc, + void (*build)(CallDescriptor*, RawMachineAssembler&), + CType (*compute)(CallDescriptor*, CType* inputs), + int seed = 1) { + int num_params = ParamCount(desc); + CHECK_LE(num_params, kMaxParamCount); + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Handle<Code> inner = Handle<Code>::null(); + { + // Build the graph for the computation. + Zone zone; + Graph graph(&zone); + RawMachineAssembler raw(isolate, &graph, desc); + build(desc, raw); + inner = CompileGraph("Compute", desc, &graph, raw.Export()); + } + + CSignature0<int32_t> csig; + ArgsBuffer<CType> io(num_params, seed); + + { + // constant mode. + Handle<Code> wrapper = Handle<Code>::null(); + { + // Wrap the above code with a callable function that passes constants. + Zone zone; + Graph graph(&zone); + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); + RawMachineAssembler raw(isolate, &graph, cdesc); + Unique<HeapObject> unique = + Unique<HeapObject>::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = zone.NewArray<Node*>(num_params); + for (int i = 0; i < num_params; i++) { + args[i] = io.MakeConstant(raw, io.input[i]); + } + + Node* call = raw.CallN(desc, target, args); + Node* store = io.StoreOutput(raw, call); + USE(store); + raw.Return(raw.Int32Constant(seed)); + wrapper = + CompileGraph("Compute-wrapper-const", cdesc, &graph, raw.Export()); + } + + CodeRunner<int32_t> runnable(isolate, wrapper, &csig); + + // Run the code, checking it against the reference. + CType expected = compute(desc, io.input); + int32_t check_seed = runnable.Call(); + CHECK_EQ(seed, check_seed); + CHECK_EQ(expected, io.output); + } + + { + // buffer mode. + Handle<Code> wrapper = Handle<Code>::null(); + { + // Wrap the above code with a callable function that loads from {input}. + Zone zone; + Graph graph(&zone); + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); + RawMachineAssembler raw(isolate, &graph, cdesc); + Node* base = raw.PointerConstant(io.input); + Unique<HeapObject> unique = + Unique<HeapObject>::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = zone.NewArray<Node*>(kMaxParamCount); + for (int i = 0; i < num_params; i++) { + args[i] = io.LoadInput(raw, base, i); + } + + Node* call = raw.CallN(desc, target, args); + Node* store = io.StoreOutput(raw, call); + USE(store); + raw.Return(raw.Int32Constant(seed)); + wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export()); + } + + CodeRunner<int32_t> runnable(isolate, wrapper, &csig); + + // Run the code, checking it against the reference. + for (int i = 0; i < 5; i++) { + CType expected = compute(desc, io.input); + int32_t check_seed = runnable.Call(); + CHECK_EQ(seed, check_seed); + CHECK_EQ(expected, io.output); + io.Mutate(); + } + } + } +}; + +} // namespace + + +static void TestInt32Sub(CallDescriptor* desc) { + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Zone zone; + GraphAndBuilders inner(&zone); + { + // Build the add function. + GraphAndBuilders& b = inner; + Node* start = b.graph()->NewNode(b.common()->Start(5)); + b.graph()->SetStart(start); + Node* p0 = b.graph()->NewNode(b.common()->Parameter(0), start); + Node* p1 = b.graph()->NewNode(b.common()->Parameter(1), start); + Node* add = b.graph()->NewNode(b.machine()->Int32Sub(), p0, p1); + Node* ret = b.graph()->NewNode(b.common()->Return(), add, start, start); + b.graph()->SetEnd(ret); + } + + Handle<Code> inner_code = CompileGraph("Int32Sub", desc, inner.graph()); + Handle<Code> wrapper = WrapWithCFunction(inner_code, desc); + MachineSignature* msig = + const_cast<MachineSignature*>(desc->GetMachineSignature()); + CodeRunner<int32_t> runnable(isolate, wrapper, + CSignature::FromMachine(&zone, msig)); + + FOR_INT32_INPUTS(i) { + FOR_INT32_INPUTS(j) { + int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) - + static_cast<uint32_t>(*j)); + int32_t result = runnable.Call(*i, *j); + CHECK_EQ(expected, result); + } + } +} + + +static void CopyTwentyInt32(CallDescriptor* desc) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + + const int kNumParams = 20; + int32_t input[kNumParams]; + int32_t output[kNumParams]; + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Handle<Code> inner = Handle<Code>::null(); + { + // Writes all parameters into the output buffer. + Zone zone; + Graph graph(&zone); + RawMachineAssembler raw(isolate, &graph, desc); + Node* base = raw.PointerConstant(output); + for (int i = 0; i < kNumParams; i++) { + Node* offset = raw.Int32Constant(i * sizeof(int32_t)); + raw.Store(kMachInt32, base, offset, raw.Parameter(i)); + } + raw.Return(raw.Int32Constant(42)); + inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export()); + } + + CSignature0<int32_t> csig; + Handle<Code> wrapper = Handle<Code>::null(); + { + // Loads parameters from the input buffer and calls the above code. + Zone zone; + Graph graph(&zone); + CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); + RawMachineAssembler raw(isolate, &graph, cdesc); + Node* base = raw.PointerConstant(input); + Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = zone.NewArray<Node*>(kNumParams); + for (int i = 0; i < kNumParams; i++) { + Node* offset = raw.Int32Constant(i * sizeof(int32_t)); + args[i] = raw.Load(kMachInt32, base, offset); + } + + Node* call = raw.CallN(desc, target, args); + raw.Return(call); + wrapper = + CompileGraph("CopyTwentyInt32-wrapper", cdesc, &graph, raw.Export()); + } + + CodeRunner<int32_t> runnable(isolate, wrapper, &csig); + + // Run the code, checking it correctly implements the memcpy. + for (int i = 0; i < 5; i++) { + uint32_t base = 1111111111u * i; + for (int j = 0; j < kNumParams; j++) { + input[j] = static_cast<int32_t>(base + 13 * j); + } + + memset(output, 0, sizeof(output)); + CHECK_EQ(42, runnable.Call()); + + for (int j = 0; j < kNumParams; j++) { + CHECK_EQ(input[j], output[j]); + } + } +} + + +static void Test_RunInt32SubWithRet(int retreg) { + Int32Signature sig(2); + Zone zone; + RegisterPairs pairs; + while (pairs.More()) { + int parray[2]; + int rarray[] = {retreg}; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(parray, 2, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + TestInt32Sub(desc); + } +} + + +// Separate tests for parallelization. +#define TEST_INT32_SUB_WITH_RET(x) \ + TEST(Run_Int32Sub_all_allocatable_pairs_##x) { \ + if (Register::kMaxNumAllocatableRegisters > x) Test_RunInt32SubWithRet(x); \ + } + + +TEST_INT32_SUB_WITH_RET(0) +TEST_INT32_SUB_WITH_RET(1) +TEST_INT32_SUB_WITH_RET(2) +TEST_INT32_SUB_WITH_RET(3) +TEST_INT32_SUB_WITH_RET(4) +TEST_INT32_SUB_WITH_RET(5) +TEST_INT32_SUB_WITH_RET(6) +TEST_INT32_SUB_WITH_RET(7) +TEST_INT32_SUB_WITH_RET(8) +TEST_INT32_SUB_WITH_RET(9) +TEST_INT32_SUB_WITH_RET(10) +TEST_INT32_SUB_WITH_RET(11) +TEST_INT32_SUB_WITH_RET(12) +TEST_INT32_SUB_WITH_RET(13) +TEST_INT32_SUB_WITH_RET(14) +TEST_INT32_SUB_WITH_RET(15) +TEST_INT32_SUB_WITH_RET(16) +TEST_INT32_SUB_WITH_RET(17) +TEST_INT32_SUB_WITH_RET(18) +TEST_INT32_SUB_WITH_RET(19) + + +TEST(Run_Int32Sub_all_allocatable_single) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + Int32Signature sig(2); + RegisterPairs pairs; + while (pairs.More()) { + Zone zone; + int parray[1]; + int rarray[1]; + pairs.Next(&rarray[0], &parray[0], true); + Allocator params(parray, 1, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + TestInt32Sub(desc); + } +} + + +TEST(Run_CopyTwentyInt32_all_allocatable_pairs) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + Int32Signature sig(20); + RegisterPairs pairs; + while (pairs.More()) { + Zone zone; + int parray[2]; + int rarray[] = {0}; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(parray, 2, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + CopyTwentyInt32(desc); + } +} + + +template <typename CType> +static void Run_Computation( + CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&), + CType (*compute)(CallDescriptor*, CType* inputs), int seed = 1) { + Computer<CType>::Run(desc, build, compute, seed); +} + + +static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, + 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, + 79, 83, 89, 97, 101, 103, 107, 109, 113}; + + +static void Build_Int32_WeightedSum(CallDescriptor* desc, + RawMachineAssembler& raw) { + Node* result = raw.Int32Constant(0); + for (int i = 0; i < ParamCount(desc); i++) { + Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i])); + result = raw.Int32Add(result, term); + } + raw.Return(result); +} + + +static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) { + uint32_t result = 0; + for (int i = 0; i < ParamCount(desc); i++) { + result += static_cast<uint32_t>(input[i]) * coeff[i]; + } + return static_cast<int32_t>(result); +} + + +static void Test_Int32_WeightedSum_of_size(int count) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + Int32Signature sig(count); + for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) { + Zone zone; + + int parray[] = {p0}; + int rarray[] = {0}; + Allocator params(parray, 1, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + Run_Computation<int32_t>(desc, Build_Int32_WeightedSum, + Compute_Int32_WeightedSum, 257 + count); + } +} + + +// Separate tests for parallelization. +#define TEST_INT32_WEIGHTEDSUM(x) \ + TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); } + + +TEST_INT32_WEIGHTEDSUM(1) +TEST_INT32_WEIGHTEDSUM(2) +TEST_INT32_WEIGHTEDSUM(3) +TEST_INT32_WEIGHTEDSUM(4) +TEST_INT32_WEIGHTEDSUM(5) +TEST_INT32_WEIGHTEDSUM(7) +TEST_INT32_WEIGHTEDSUM(9) +TEST_INT32_WEIGHTEDSUM(11) +TEST_INT32_WEIGHTEDSUM(17) +TEST_INT32_WEIGHTEDSUM(19) + + +template <int which> +static void Build_Select(CallDescriptor* desc, RawMachineAssembler& raw) { + raw.Return(raw.Parameter(which)); +} + + +template <typename CType, int which> +static CType Compute_Select(CallDescriptor* desc, CType* inputs) { + return inputs[which]; +} + + +template <typename CType, int which> +static void RunSelect(CallDescriptor* desc) { + int count = ParamCount(desc); + if (count <= which) return; + Run_Computation<CType>(desc, Build_Select<which>, + Compute_Select<CType, which>, + 1044 + which + 3 * sizeof(CType)); +} + + +template <int which> +void Test_Int32_Select() { + if (DISABLE_NATIVE_STACK_PARAMS) return; + + int parray[] = {0}; + int rarray[] = {0}; + Allocator params(parray, 1, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + + Zone zone; + + for (int i = which + 1; i <= 64; i++) { + Int32Signature sig(i); + CallDescriptor* desc = config.Create(&zone, &sig); + RunSelect<int32_t, which>(desc); + } +} + + +// Separate tests for parallelization. +#define TEST_INT32_SELECT(x) \ + TEST(Run_Int32_Select_##x) { Test_Int32_Select<x>(); } + + +TEST_INT32_SELECT(0) +TEST_INT32_SELECT(1) +TEST_INT32_SELECT(2) +TEST_INT32_SELECT(3) +TEST_INT32_SELECT(4) +TEST_INT32_SELECT(5) +TEST_INT32_SELECT(6) +TEST_INT32_SELECT(11) +TEST_INT32_SELECT(15) +TEST_INT32_SELECT(19) +TEST_INT32_SELECT(45) +TEST_INT32_SELECT(62) +TEST_INT32_SELECT(63) + + +TEST(Int64Select_registers) { + if (Register::kMaxNumAllocatableRegisters < 2) return; + if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms + + int rarray[] = {0}; + ArgsBuffer<int64_t>::Sig sig(2); + + RegisterPairs pairs; + Zone zone; + while (pairs.More()) { + int parray[2]; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(parray, 2, nullptr, 0); + Allocator rets(rarray, 1, nullptr, 0); + RegisterConfig config(params, rets); + + CallDescriptor* desc = config.Create(&zone, &sig); + RunSelect<int64_t, 0>(desc); + RunSelect<int64_t, 1>(desc); + } +} + + +TEST(Float32Select_registers) { + if (RegisterConfiguration::ArchDefault()->num_double_registers() < 2) return; + + int rarray[] = {0}; + ArgsBuffer<float32>::Sig sig(2); + + Float32RegisterPairs pairs; + Zone zone; + while (pairs.More()) { + int parray[2]; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(nullptr, 0, parray, 2); + Allocator rets(nullptr, 0, rarray, 1); + RegisterConfig config(params, rets); + + CallDescriptor* desc = config.Create(&zone, &sig); + RunSelect<float32, 0>(desc); + RunSelect<float32, 1>(desc); + } +} + + +TEST(Float64Select_registers) { + if (RegisterConfiguration::ArchDefault()->num_double_registers() < 2) return; + + int rarray[] = {0}; + ArgsBuffer<float64>::Sig sig(2); + + Float64RegisterPairs pairs; + Zone zone; + while (pairs.More()) { + int parray[2]; + pairs.Next(&parray[0], &parray[1], false); + Allocator params(nullptr, 0, parray, 2); + Allocator rets(nullptr, 0, rarray, 1); + RegisterConfig config(params, rets); + + CallDescriptor* desc = config.Create(&zone, &sig); + RunSelect<float64, 0>(desc); + RunSelect<float64, 1>(desc); + } +} + + +TEST(Float32Select_stack_params_return_reg) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + int rarray[] = {0}; + Allocator params(nullptr, 0, nullptr, 0); + Allocator rets(nullptr, 0, rarray, 1); + RegisterConfig config(params, rets); + + Zone zone; + for (int count = 1; count < 6; count++) { + ArgsBuffer<float32>::Sig sig(count); + CallDescriptor* desc = config.Create(&zone, &sig); + RunSelect<float32, 0>(desc); + RunSelect<float32, 1>(desc); + RunSelect<float32, 2>(desc); + RunSelect<float32, 3>(desc); + RunSelect<float32, 4>(desc); + RunSelect<float32, 5>(desc); + } +} + + +TEST(Float64Select_stack_params_return_reg) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + int rarray[] = {0}; + Allocator params(nullptr, 0, nullptr, 0); + Allocator rets(nullptr, 0, rarray, 1); + RegisterConfig config(params, rets); + + Zone zone; + for (int count = 1; count < 6; count++) { + ArgsBuffer<float64>::Sig sig(count); + CallDescriptor* desc = config.Create(&zone, &sig); + RunSelect<float64, 0>(desc); + RunSelect<float64, 1>(desc); + RunSelect<float64, 2>(desc); + RunSelect<float64, 3>(desc); + RunSelect<float64, 4>(desc); + RunSelect<float64, 5>(desc); + } +} + + +template <typename CType, int which> +static void Build_Select_With_Call(CallDescriptor* desc, + RawMachineAssembler& raw) { + Handle<Code> inner = Handle<Code>::null(); + int num_params = ParamCount(desc); + CHECK_LE(num_params, kMaxParamCount); + { + Isolate* isolate = CcTest::InitIsolateOnce(); + // Build the actual select. + Zone zone; + Graph graph(&zone); + RawMachineAssembler raw(isolate, &graph, desc); + raw.Return(raw.Parameter(which)); + inner = CompileGraph("Select-indirection", desc, &graph, raw.Export()); + CHECK(!inner.is_null()); + CHECK(inner->IsCode()); + } + + { + // Build a call to the function that does the select. + Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner); + Node* target = raw.HeapConstant(unique); + Node** args = raw.zone()->NewArray<Node*>(num_params); + for (int i = 0; i < num_params; i++) { + args[i] = raw.Parameter(i); + } + + Node* call = raw.CallN(desc, target, args); + raw.Return(call); + } +} + + +TEST(Float64StackParamsToStackParams) { + if (DISABLE_NATIVE_STACK_PARAMS) return; + + int rarray[] = {0}; + Allocator params(nullptr, 0, nullptr, 0); + Allocator rets(nullptr, 0, rarray, 1); + + Zone zone; + ArgsBuffer<float64>::Sig sig(2); + RegisterConfig config(params, rets); + CallDescriptor* desc = config.Create(&zone, &sig); + + Run_Computation<float64>(desc, Build_Select_With_Call<float64, 0>, + Compute_Select<float64, 0>, 1098); + + Run_Computation<float64>(desc, Build_Select_With_Call<float64, 1>, + Compute_Select<float64, 1>, 1099); +} diff --git a/deps/v8/test/cctest/compiler/test-run-properties.cc b/deps/v8/test/cctest/compiler/test-run-properties.cc index d4442f7a85..b7677f7fd2 100644 --- a/deps/v8/test/cctest/compiler/test-run-properties.cc +++ b/deps/v8/test/cctest/compiler/test-run-properties.cc @@ -21,16 +21,15 @@ static void TypedArrayLoadHelper(const char* array_type) { values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]); } - // Note that below source creates two different typed arrays with distinct - // elements kind to get coverage for both access patterns: - // - IsFixedTypedArrayElementsKind(x) - // - IsExternalArrayElementsKind(y) + // Note that below source creates two different typed arrays with the same + // elements kind to get coverage for both (on heap / with external backing + // store) access patterns. const char* source = "(function(a) {" " var x = (a = new %sArray(%d)); %s;" " var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);" " if (!%%HasFixed%sElements(x)) %%AbortJS('x');" - " if (!%%HasExternal%sElements(y)) %%AbortJS('y');" + " if (!%%HasFixed%sElements(y)) %%AbortJS('y');" " function f(a,b) {" " a = a | 0; b = b | 0;" " return x[a] + y[b];" @@ -84,16 +83,15 @@ static void TypedArrayStoreHelper(const char* array_type) { values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]); } - // Note that below source creates two different typed arrays with distinct - // elements kind to get coverage for both access patterns: - // - IsFixedTypedArrayElementsKind(x) - // - IsExternalArrayElementsKind(y) + // Note that below source creates two different typed arrays with the same + // elements kind to get coverage for both (on heap/with external backing + // store) access patterns. const char* source = "(function(a) {" " var x = (a = new %sArray(%d)); %s;" " var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);" " if (!%%HasFixed%sElements(x)) %%AbortJS('x');" - " if (!%%HasExternal%sElements(y)) %%AbortJS('y');" + " if (!%%HasFixed%sElements(y)) %%AbortJS('y');" " function f(a,b) {" " a = a | 0; b = b | 0;" " var t = x[a];" diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc index 9c7998d7af..607efa135b 100644 --- a/deps/v8/test/cctest/compiler/test-run-stubs.cc +++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc @@ -14,23 +14,21 @@ #include "src/parser.h" #include "test/cctest/compiler/function-tester.h" -#if V8_TURBOFAN_TARGET - using namespace v8::internal; using namespace v8::internal::compiler; -TEST(RunMathFloorStub) { +TEST(RunOptimizedMathFloorStub) { HandleAndZoneScope scope; Isolate* isolate = scope.main_isolate(); // Create code and an accompanying descriptor. - MathFloorStub stub(isolate); + MathFloorStub stub(isolate, TurboFanIC::CALL_FROM_OPTIMIZED_CODE); Handle<Code> code = stub.GenerateCode(); Zone* zone = scope.main_zone(); - CompilationInfo info(&stub, isolate, zone); CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info); + Handle<FixedArray> tv = isolate->factory()->NewFixedArray(10); // Create a function to call the code using the descriptor. Graph graph(zone); @@ -45,10 +43,13 @@ TEST(RunMathFloorStub) { Node* numberParam = graph.NewNode(common.Parameter(1), start); Unique<HeapObject> u = Unique<HeapObject>::CreateImmovable(code); Node* theCode = graph.NewNode(common.HeapConstant(u)); + Unique<HeapObject> tvu = Unique<HeapObject>::CreateImmovable(tv); + Node* vector = graph.NewNode(common.HeapConstant(tvu)); Node* dummyContext = graph.NewNode(common.NumberConstant(0.0)); - Node* call = graph.NewNode(common.Call(descriptor), theCode, - js.UndefinedConstant(), js.UndefinedConstant(), - numberParam, dummyContext, start, start); + Node* call = + graph.NewNode(common.Call(descriptor), theCode, js.UndefinedConstant(), + js.OneConstant(), vector, js.UndefinedConstant(), + numberParam, dummyContext, start, start); Node* ret = graph.NewNode(common.Return(), call, call, start); Node* end = graph.NewNode(common.End(1), ret); graph.SetStart(start); @@ -143,5 +144,3 @@ TEST(RunStringAddTFStub) { Handle<Object> result = ft.Call(leftArg, rightArg).ToHandleChecked(); CHECK(String::Equals(ft.Val("linksrechts"), Handle<String>::cast(result))); } - -#endif // V8_TURBOFAN_TARGET diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc index 022e01690b..2a642c1589 100644 --- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc +++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc @@ -110,14 +110,12 @@ TEST(RunNumberToInt32_float64) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { FOR_FLOAT64_INPUTS(i) { input = *i; int32_t expected = DoubleToInt32(*i); t.Call(); CHECK_EQ(expected, result); } - } } @@ -139,7 +137,6 @@ TEST(RunNumberToUint32_float64) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { FOR_FLOAT64_INPUTS(i) { input = *i; uint32_t expected = DoubleToUint32(*i); @@ -147,7 +144,6 @@ TEST(RunNumberToUint32_float64) { CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result)); } } -} // Create a simple JSObject with a unique map. @@ -168,12 +164,10 @@ TEST(RunLoadMap) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { - Handle<JSObject> src = TestObject(); - Handle<Map> src_map(src->map()); - Object* result = t.Call(*src); // TODO(titzer): raw pointers in call - CHECK_EQ(*src_map, result); - } + Handle<JSObject> src = TestObject(); + Handle<Map> src_map(src->map()); + Object* result = t.Call(*src); // TODO(titzer): raw pointers in call + CHECK_EQ(*src_map, result); } @@ -186,7 +180,6 @@ TEST(RunStoreMap) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Handle<JSObject> src = TestObject(); Handle<Map> src_map(src->map()); Handle<JSObject> dst = TestObject(); @@ -194,7 +187,6 @@ TEST(RunStoreMap) { t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call CHECK(*src_map == dst->map()); } -} TEST(RunLoadProperties) { @@ -206,12 +198,10 @@ TEST(RunLoadProperties) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Handle<JSObject> src = TestObject(); Handle<FixedArray> src_props(src->properties()); Object* result = t.Call(*src); // TODO(titzer): raw pointers in call CHECK_EQ(*src_props, result); - } } @@ -225,7 +215,6 @@ TEST(RunLoadStoreMap) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Handle<JSObject> src = TestObject(); Handle<Map> src_map(src->map()); Handle<JSObject> dst = TestObject(); @@ -234,7 +223,6 @@ TEST(RunLoadStoreMap) { CHECK(result->IsMap()); CHECK_EQ(*src_map, result); CHECK(*src_map == dst->map()); - } } @@ -248,7 +236,6 @@ TEST(RunLoadStoreFixedArrayIndex) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Handle<FixedArray> array = t.factory()->NewFixedArray(2); Handle<JSObject> src = TestObject(); Handle<JSObject> dst = TestObject(); @@ -258,7 +245,6 @@ TEST(RunLoadStoreFixedArrayIndex) { CHECK_EQ(*src, result); CHECK_EQ(*src, array->get(0)); CHECK_EQ(*src, array->get(1)); - } } @@ -279,7 +265,6 @@ TEST(RunLoadStoreArrayBuffer) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer(); Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length); uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store()); @@ -296,7 +281,6 @@ TEST(RunLoadStoreArrayBuffer) { CHECK_EQ(data[i], expected); } } -} TEST(RunLoadFieldFromUntaggedBase) { @@ -312,8 +296,6 @@ TEST(RunLoadFieldFromUntaggedBase) { t.Return(load); t.LowerAllNodes(); - if (!Pipeline::SupportedTarget()) continue; - for (int j = -5; j <= 5; j++) { Smi* expected = Smi::FromInt(j); smis[i] = expected; @@ -337,8 +319,6 @@ TEST(RunStoreFieldToUntaggedBase) { t.Return(p0); t.LowerAllNodes(); - if (!Pipeline::SupportedTarget()) continue; - for (int j = -5; j <= 5; j++) { Smi* expected = Smi::FromInt(j); smis[i] = Smi::FromInt(-100); @@ -365,8 +345,6 @@ TEST(RunLoadElementFromUntaggedBase) { t.Return(load); t.LowerAllNodes(); - if (!Pipeline::SupportedTarget()) continue; - for (int k = -5; k <= 5; k++) { Smi* expected = Smi::FromInt(k); smis[i + j] = expected; @@ -394,8 +372,6 @@ TEST(RunStoreElementFromUntaggedBase) { t.Return(p0); t.LowerAllNodes(); - if (!Pipeline::SupportedTarget()) continue; - for (int k = -5; k <= 5; k++) { Smi* expected = Smi::FromInt(k); smis[i + j] = Smi::FromInt(-100); @@ -462,10 +438,8 @@ class AccessTester : public HandleAndZoneScope { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Object* result = t.Call(); CHECK_EQ(t.isolate()->heap()->true_value(), result); - } } // Create and run code that copies the field in either {untagged_array} @@ -484,10 +458,8 @@ class AccessTester : public HandleAndZoneScope { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Object* result = t.Call(); CHECK_EQ(t.isolate()->heap()->true_value(), result); - } } // Create and run code that copies the elements from {this} to {that}. @@ -525,10 +497,8 @@ class AccessTester : public HandleAndZoneScope { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { Object* result = t.Call(); CHECK_EQ(t.isolate()->heap()->true_value(), result); - } #endif } @@ -596,13 +566,11 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) { } else { a.RunCopyElement(i, i + 1); // Test element read/write. } - if (Pipeline::SupportedTarget()) { // verify. for (int j = 0; j < num_elements; j++) { E expect = j == (i + 1) ? original_elements[i] : original_elements[j]; CHECK_EQ(expect, a.GetElement(j)); } - } } } } @@ -612,10 +580,8 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) { AccessTester<E> a(tf == 1, rep, original_elements, num); AccessTester<E> b(tt == 1, rep, original_elements, num); a.RunCopyElements(&b); - if (Pipeline::SupportedTarget()) { // verify. for (int i = 0; i < num_elements; i++) { CHECK_EQ(a.GetElement(i), b.GetElement(i)); - } } } } @@ -668,7 +634,7 @@ TEST(RunAccessTests_Smi) { RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data)); } -#if V8_TURBOFAN_TARGET + TEST(RunAllocate) { PretenureFlag flag[] = {NOT_TENURED, TENURED}; @@ -684,15 +650,13 @@ TEST(RunAllocate) { t.LowerAllNodes(); t.GenerateCode(); - if (Pipeline::SupportedTarget()) { HeapObject* result = t.CallWithPotentialGC<HeapObject>(); CHECK(t.heap()->new_space()->Contains(result) || flag[i] == TENURED); CHECK(t.heap()->old_space()->Contains(result) || flag[i] == NOT_TENURED); CHECK(result->IsHeapNumber()); - } } } -#endif + // Fills in most of the nodes of the graph in order to make tests shorter. class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders { @@ -1264,7 +1228,6 @@ TEST(LowerReferenceEqual_to_wordeq) { TEST(LowerStringOps_to_call_and_compare) { - if (Pipeline::SupportedTarget()) { // These tests need linkage for the calls. TestingGraph t(Type::String(), Type::String()); IrOpcode::Value compare_eq = @@ -1277,7 +1240,6 @@ TEST(LowerStringOps_to_call_and_compare) { t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan()); t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual()); } -} void CheckChangeInsertion(IrOpcode::Value expected, MachineType from, @@ -1708,7 +1670,6 @@ TEST(RunNumberDivide_minus_1_TruncatingToInt32) { Node* trunc = t.NumberToInt32(div); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); @@ -1716,7 +1677,6 @@ TEST(RunNumberDivide_minus_1_TruncatingToInt32) { int32_t x = 0 - *i; t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); } - } } @@ -1747,7 +1707,6 @@ TEST(RunNumberMultiply_TruncatingToInt32) { Node* trunc = t.NumberToInt32(mul); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); @@ -1756,7 +1715,6 @@ TEST(RunNumberMultiply_TruncatingToInt32) { t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); } } - } } @@ -1771,14 +1729,12 @@ TEST(RunNumberMultiply_TruncatingToUint32) { Node* trunc = t.NumberToUint32(mul); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); FOR_UINT32_INPUTS(i) { uint32_t x = DoubleToUint32(static_cast<double>(*i) * k); t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); - } } } } @@ -1791,7 +1747,6 @@ TEST(RunNumberDivide_2_TruncatingToUint32) { Node* trunc = t.NumberToUint32(div); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); @@ -1799,7 +1754,6 @@ TEST(RunNumberDivide_2_TruncatingToUint32) { uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0)); t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); } - } } @@ -1853,7 +1807,6 @@ TEST(RunNumberDivide_TruncatingToInt32) { Node* trunc = t.NumberToInt32(div); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); @@ -1861,7 +1814,6 @@ TEST(RunNumberDivide_TruncatingToInt32) { if (*i == INT_MAX) continue; // exclude max int. int32_t x = DoubleToInt32(static_cast<double>(*i) / k); t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); - } } } } @@ -1894,14 +1846,12 @@ TEST(RunNumberDivide_TruncatingToUint32) { Node* trunc = t.NumberToUint32(div); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); FOR_UINT32_INPUTS(i) { uint32_t x = *i / k; t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); - } } } } @@ -1972,7 +1922,6 @@ TEST(RunNumberModulus_TruncatingToInt32) { Node* trunc = t.NumberToInt32(mod); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); @@ -1980,7 +1929,6 @@ TEST(RunNumberModulus_TruncatingToInt32) { if (*i == INT_MAX) continue; // exclude max int. int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k)); t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); - } } } } @@ -2014,14 +1962,12 @@ TEST(RunNumberModulus_TruncatingToUint32) { Node* trunc = t.NumberToUint32(mod); t.Return(trunc); - if (Pipeline::SupportedTarget()) { t.LowerAllNodesAndLowerChanges(); t.GenerateCode(); FOR_UINT32_INPUTS(i) { uint32_t x = *i % k; t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i)); - } } } } |