summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/compiler/test-code-generator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/compiler/test-code-generator.cc')
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc590
1 files changed, 337 insertions, 253 deletions
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 26ca83a475..7d86c2e359 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -2,9 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
@@ -34,13 +37,12 @@ namespace {
enum MoveMode { kParallelMoves, kSequentialMoves };
-ParallelMove* CopyMoves(ParallelMove* moves, Zone* zone) {
- ParallelMove* copy = zone->New<ParallelMove>(zone);
- for (auto m : *moves) {
- copy->AddMove(m->source(), m->destination());
- }
- return copy;
-}
+// Whether the layout before and after the moves must be the same.
+enum LayoutMode {
+ kPreserveLayout,
+ kChangeLayout,
+};
+enum OperandLifetime { kInput, kOutput };
int GetSlotSizeInBytes(MachineRepresentation rep) {
switch (rep) {
@@ -60,9 +62,9 @@ int GetSlotSizeInBytes(MachineRepresentation rep) {
}
// Forward declaration.
-Handle<Code> BuildTeardownFunction(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::vector<AllocatedOperand> parameters);
+Handle<Code> BuildTeardownFunction(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ const std::vector<AllocatedOperand>& parameters);
// Build the `setup` function. It takes a code object and a FixedArray as
// parameters and calls the former while passing it each element of the array as
@@ -90,23 +92,27 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
// | | | 128-bit vector. |
//
Handle<Code> BuildSetupFunction(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::vector<AllocatedOperand> parameters) {
- CodeAssemblerTester tester(isolate, 3, CodeKind::BUILTIN,
- "setup"); // Include receiver.
+ CallDescriptor* test_call_descriptor,
+ CallDescriptor* teardown_call_descriptor,
+ std::vector<AllocatedOperand> parameters,
+ const std::vector<AllocatedOperand>& results) {
+ CodeAssemblerTester tester(isolate, JSParameterCount(2), CodeKind::BUILTIN,
+ "setup");
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
params.push_back(__ Parameter<Object>(1));
- params.push_back(__ HeapConstant(ToCodeT(
- BuildTeardownFunction(isolate, call_descriptor, parameters), isolate)));
+ // The parameters of the teardown function are the results of the test
+ // function.
+ params.push_back(__ HeapConstant(
+ BuildTeardownFunction(isolate, teardown_call_descriptor, results)));
// First allocate the FixedArray which will hold the final results. Here we
// should take care of all allocations, meaning we allocate HeapNumbers and
// FixedArrays representing Simd128 values.
TNode<FixedArray> state_out =
- __ AllocateZeroedFixedArray(__ IntPtrConstant(parameters.size()));
- for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
- switch (parameters[i].representation()) {
+ __ AllocateZeroedFixedArray(__ IntPtrConstant(results.size()));
+ for (int i = 0; i < static_cast<int>(results.size()); i++) {
+ switch (results[i].representation()) {
case MachineRepresentation::kTagged:
break;
case MachineRepresentation::kFloat32:
@@ -165,7 +171,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
}
__ Return(
__ UncheckedCast<Object>(tester.raw_assembler_for_testing()->AddNode(
- tester.raw_assembler_for_testing()->common()->Call(call_descriptor),
+ tester.raw_assembler_for_testing()->common()->Call(
+ test_call_descriptor),
static_cast<int>(params.size()), params.data())));
return tester.GenerateCodeCloseAndEscape();
}
@@ -212,9 +219,9 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
// UNSAFE_SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may
// clobber the top 64 bits of Simd128 registers. This is the case on x64, ia32
// and Arm64 for example.
-Handle<Code> BuildTeardownFunction(Isolate* isolate,
- CallDescriptor* call_descriptor,
- std::vector<AllocatedOperand> parameters) {
+Handle<Code> BuildTeardownFunction(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ const std::vector<AllocatedOperand>& parameters) {
CodeAssemblerTester tester(isolate, call_descriptor, "teardown");
CodeStubAssembler assembler(tester.state());
auto result_array = __ Parameter<FixedArray>(1);
@@ -397,10 +404,11 @@ class TestEnvironment : public HandleAndZoneScope {
static constexpr int kFloatConstantCount = 4;
static constexpr int kDoubleConstantCount = 4;
- TestEnvironment()
+ explicit TestEnvironment(LayoutMode layout_mode = kPreserveLayout)
: blocks_(1, NewBlock(main_zone(), RpoNumber::FromInt(0)), main_zone()),
instructions_(main_isolate(), main_zone(), &blocks_),
rng_(CcTest::random_number_generator()),
+ layout_mode_(layout_mode),
supported_reps_({MachineRepresentation::kTagged,
MachineRepresentation::kFloat32,
MachineRepresentation::kFloat64}) {
@@ -424,20 +432,23 @@ class TestEnvironment : public HandleAndZoneScope {
LocationSignature::Builder test_signature(
main_zone(), 1,
2 + kGeneralRegisterCount + kDoubleRegisterCount + stack_slot_count_);
+ LocationSignature::Builder teardown_signature(
+ main_zone(), 1,
+ 2 + kGeneralRegisterCount + kDoubleRegisterCount + stack_slot_count_);
- // The first parameter will be the code object of the "teardown"
- // function. This way, the "test" function can tail-call to it.
- test_signature.AddParam(LinkageLocation::ForRegister(
- kReturnRegister0.code(), MachineType::AnyTagged()));
-
- // The second parameter will be a pre-allocated FixedArray that the
- // "teardown" function will fill with result and then return. We place this
- // parameter on the first stack argument slot which is always -1. And
- // therefore slots to perform moves on start at -2.
- test_signature.AddParam(
- LinkageLocation::ForCallerFrameSlot(-1, MachineType::AnyTagged()));
- int slot_parameter_n = -2;
- const int kTotalStackParameterCount = stack_slot_count_ + 1;
+ for (auto* sig : {&test_signature, &teardown_signature}) {
+ // The first parameter will be the code object of the "teardown"
+ // function. This way, the "test" function can tail-call to it.
+ sig->AddParam(LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::AnyTagged()));
+
+ // The second parameter will be a pre-allocated FixedArray that the
+ // "teardown" function will fill with result and then return. We place
+ // this parameter on the first stack argument slot which is always -1. And
+ // therefore slots to perform moves on start at -2.
+ sig->AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1, MachineType::AnyTagged()));
+ }
// Initialise registers.
@@ -449,6 +460,60 @@ class TestEnvironment : public HandleAndZoneScope {
DCHECK_LE(kGeneralRegisterCount,
GetRegConfig()->num_allocatable_general_registers() - 2);
+ GenerateLayout(setup_layout_, allocated_slots_in_, &test_signature);
+ test_descriptor_ = MakeCallDescriptor(test_signature.Build());
+
+ if (layout_mode_ == kChangeLayout) {
+ GenerateLayout(teardown_layout_, allocated_slots_out_,
+ &teardown_signature);
+ teardown_descriptor_ = MakeCallDescriptor(teardown_signature.Build());
+ }
+ // Else, we just reuse the layout and signature of the setup function for
+ // the teardown function since they are the same.
+ }
+
+ void AddStackSlots(
+ std::vector<AllocatedOperand>& layout,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots,
+ LocationSignature::Builder* sig) {
+ // The first stack slot is the FixedArray, start at -2.
+ int slot_parameter_n = -2;
+ std::map<MachineRepresentation, int> slot_count = {
+ {MachineRepresentation::kTagged, kTaggedSlotCount},
+ {MachineRepresentation::kFloat32, kFloat32SlotCount},
+ {MachineRepresentation::kFloat64, kFloat64SlotCount}};
+ if (TestSimd128Moves()) {
+ slot_count.emplace(MachineRepresentation::kSimd128, kSimd128SlotCount);
+ }
+
+ // Allocate new slots until we run out of them.
+ while (std::any_of(slot_count.cbegin(), slot_count.cend(),
+ [](const std::pair<MachineRepresentation, int>& entry) {
+ // True if there are slots left to allocate for this
+ // representation.
+ return entry.second > 0;
+ })) {
+ // Pick a random MachineRepresentation from supported_reps_.
+ MachineRepresentation rep = CreateRandomMachineRepresentation();
+ auto entry = slot_count.find(rep);
+ DCHECK_NE(entry, slot_count.end());
+ // We may have picked a representation for which all slots have already
+ // been allocated.
+ if (entry->second > 0) {
+ // Keep a map of (MachineRepresentation . std::vector<int>) with
+ // allocated slots to pick from for each representation.
+ int slot = slot_parameter_n;
+ slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
+ AddStackSlot(layout, slots, sig, rep, slot);
+ entry->second--;
+ }
+ }
+ }
+
+ void GenerateLayout(
+ std::vector<AllocatedOperand>& layout,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots,
+ LocationSignature::Builder* sig) {
RegList general_mask =
RegList::FromBits(GetRegConfig()->allocatable_general_codes_mask());
// kReturnRegister0 is used to hold the "teardown" code object, do not
@@ -459,7 +524,7 @@ class TestEnvironment : public HandleAndZoneScope {
for (int i = 0; i < kGeneralRegisterCount; i++) {
int code = registers->GetAllocatableGeneralCode(i);
- AddRegister(&test_signature, MachineRepresentation::kTagged, code);
+ AddRegister(layout, sig, MachineRepresentation::kTagged, code);
}
// We assume that Double, Float and Simd128 registers alias, depending on
// kSimpleFPAliasing. For this reason, we allocate a Float, Double and
@@ -472,12 +537,12 @@ class TestEnvironment : public HandleAndZoneScope {
if (kFPAliasing != AliasingKind::kCombine) {
// Allocate three registers at once if kSimd128 is supported, else
// allocate in pairs.
- AddRegister(&test_signature, MachineRepresentation::kFloat32,
+ AddRegister(layout, sig, MachineRepresentation::kFloat32,
registers->GetAllocatableFloatCode(i));
- AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ AddRegister(layout, sig, MachineRepresentation::kFloat64,
registers->GetAllocatableDoubleCode(i + 1));
if (TestSimd128Moves()) {
- AddRegister(&test_signature, MachineRepresentation::kSimd128,
+ AddRegister(layout, sig, MachineRepresentation::kSimd128,
registers->GetAllocatableSimd128Code(i + 2));
i++;
}
@@ -499,52 +564,19 @@ class TestEnvironment : public HandleAndZoneScope {
"Arm has a q8 and a d16 register but no overlapping s32 register.");
int first_simd128 = registers->GetAllocatableSimd128Code(i);
int second_simd128 = registers->GetAllocatableSimd128Code(i + 1);
- AddRegister(&test_signature, MachineRepresentation::kFloat32,
+ AddRegister(layout, sig, MachineRepresentation::kFloat32,
first_simd128 * 4);
- AddRegister(&test_signature, MachineRepresentation::kFloat64,
+ AddRegister(layout, sig, MachineRepresentation::kFloat64,
second_simd128 * 2);
if (TestSimd128Moves()) {
int third_simd128 = registers->GetAllocatableSimd128Code(i + 2);
- AddRegister(&test_signature, MachineRepresentation::kSimd128,
+ AddRegister(layout, sig, MachineRepresentation::kSimd128,
third_simd128);
i++;
}
}
}
- // Initialise stack slots.
-
- std::map<MachineRepresentation, int> slots = {
- {MachineRepresentation::kTagged, kTaggedSlotCount},
- {MachineRepresentation::kFloat32, kFloat32SlotCount},
- {MachineRepresentation::kFloat64, kFloat64SlotCount}};
- if (TestSimd128Moves()) {
- slots.emplace(MachineRepresentation::kSimd128, kSimd128SlotCount);
- }
-
- // Allocate new slots until we run out of them.
- while (std::any_of(slots.cbegin(), slots.cend(),
- [](const std::pair<MachineRepresentation, int>& entry) {
- // True if there are slots left to allocate for this
- // representation.
- return entry.second > 0;
- })) {
- // Pick a random MachineRepresentation from supported_reps_.
- MachineRepresentation rep = CreateRandomMachineRepresentation();
- auto entry = slots.find(rep);
- DCHECK(entry != slots.end());
- // We may have picked a representation for which all slots have already
- // been allocated.
- if (entry->second > 0) {
- // Keep a map of (MachineRepresentation . std::vector<int>) with
- // allocated slots to pick from for each representation.
- int slot = slot_parameter_n;
- slot_parameter_n -= (GetSlotSizeInBytes(rep) / kSystemPointerSize);
- AddStackSlot(&test_signature, rep, slot);
- entry->second--;
- }
- }
-
// Initialise random constants.
// While constants do not know about Smis, we need to be able to
@@ -574,15 +606,19 @@ class TestEnvironment : public HandleAndZoneScope {
}
// The "teardown" function returns a FixedArray with the resulting state.
- test_signature.AddReturn(LinkageLocation::ForRegister(
- kReturnRegister0.code(), MachineType::AnyTagged()));
+ sig->AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
+ MachineType::AnyTagged()));
+ AddStackSlots(layout, slots, sig);
+ }
- test_descriptor_ = main_zone()->New<CallDescriptor>(
+ CallDescriptor* MakeCallDescriptor(LocationSignature* sig) {
+ const int kTotalStackParameterCount = stack_slot_count_ + 1;
+ return main_zone()->New<CallDescriptor>(
CallDescriptor::kCallCodeObject, // kind
MachineType::AnyTagged(), // target MachineType
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
- test_signature.Build(), // location_sig
+ sig, // location_sig
kTotalStackParameterCount, // stack_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
@@ -613,10 +649,11 @@ class TestEnvironment : public HandleAndZoneScope {
// from on `teardown`. Therefore they are part of the environment's layout,
// and are parameters of the `test` function.
- void AddRegister(LocationSignature::Builder* test_signature,
+ void AddRegister(std::vector<AllocatedOperand>& layout,
+ LocationSignature::Builder* test_signature,
MachineRepresentation rep, int code) {
AllocatedOperand operand(AllocatedOperand::REGISTER, rep, code);
- layout_.push_back(operand);
+ layout.push_back(operand);
test_signature->AddParam(LinkageLocation::ForRegister(
code, MachineType::TypeForRepresentation(rep)));
auto entry = allocated_registers_.find(rep);
@@ -627,15 +664,17 @@ class TestEnvironment : public HandleAndZoneScope {
}
}
- void AddStackSlot(LocationSignature::Builder* test_signature,
- MachineRepresentation rep, int slot) {
+ void AddStackSlot(
+ std::vector<AllocatedOperand>& layout,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots,
+ LocationSignature::Builder* sig, MachineRepresentation rep, int slot) {
AllocatedOperand operand(AllocatedOperand::STACK_SLOT, rep, slot);
- layout_.push_back(operand);
- test_signature->AddParam(LinkageLocation::ForCallerFrameSlot(
+ layout.push_back(operand);
+ sig->AddParam(LinkageLocation::ForCallerFrameSlot(
slot, MachineType::TypeForRepresentation(rep)));
- auto entry = allocated_slots_.find(rep);
- if (entry == allocated_slots_.end()) {
- allocated_slots_.emplace(rep, std::vector<AllocatedOperand>{operand});
+ auto entry = slots.find(rep);
+ if (entry == slots.end()) {
+ slots.emplace(rep, std::vector<AllocatedOperand>{operand});
} else {
entry->second.push_back(operand);
}
@@ -646,9 +685,9 @@ class TestEnvironment : public HandleAndZoneScope {
// environment.
Handle<FixedArray> GenerateInitialState() {
Handle<FixedArray> state = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(setup_layout_.size()));
for (int i = 0; i < state->length(); i++) {
- switch (layout_[i].representation()) {
+ switch (setup_layout_[i].representation()) {
case MachineRepresentation::kTagged:
state->set(i, Smi::FromInt(rng_->NextInt(Smi::kMaxValue)));
break;
@@ -687,7 +726,7 @@ class TestEnvironment : public HandleAndZoneScope {
// return a new resulting state.
Handle<FixedArray> Run(Handle<Code> test, Handle<FixedArray> state_in) {
Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(TeardownLayout().size()));
{
#ifdef ENABLE_SLOW_DCHECKS
// The "setup" and "teardown" functions are relatively big, and with
@@ -696,8 +735,9 @@ class TestEnvironment : public HandleAndZoneScope {
bool old_enable_slow_asserts = v8_flags.enable_slow_asserts;
v8_flags.enable_slow_asserts = false;
#endif
- Handle<Code> setup =
- BuildSetupFunction(main_isolate(), test_descriptor_, layout_);
+ Handle<Code> setup = BuildSetupFunction(main_isolate(), test_descriptor_,
+ TeardownCallDescriptor(),
+ setup_layout_, TeardownLayout());
#ifdef ENABLE_SLOW_DCHECKS
v8_flags.enable_slow_asserts = old_enable_slow_asserts;
#endif
@@ -705,72 +745,109 @@ class TestEnvironment : public HandleAndZoneScope {
// return value will be freed along with it. Copy the result into
// state_out.
FunctionTester ft(setup, 2);
- Handle<FixedArray> result =
- ft.CallChecked<FixedArray>(ToCodeT(test, main_isolate()), state_in);
+ Handle<FixedArray> result = ft.CallChecked<FixedArray>(test, state_in);
CHECK_EQ(result->length(), state_in->length());
result->CopyTo(0, *state_out, 0, result->length());
}
return state_out;
}
+ std::vector<AllocatedOperand>& TeardownLayout() {
+ return layout_mode_ == kPreserveLayout ? setup_layout_ : teardown_layout_;
+ }
+
+ CallDescriptor* TeardownCallDescriptor() {
+ return layout_mode_ == kPreserveLayout ? test_descriptor_
+ : teardown_descriptor_;
+ }
+
// For a given operand representing either a register or a stack slot, return
// what position it should live in inside a FixedArray state.
- int OperandToStatePosition(const AllocatedOperand& operand) const {
+ int OperandToStatePosition(std::vector<AllocatedOperand>& layout,
+ const AllocatedOperand& operand) const {
// Search `layout_` for `operand`.
- auto it = std::find_if(layout_.cbegin(), layout_.cend(),
+ auto it = std::find_if(layout.cbegin(), layout.cend(),
[operand](const AllocatedOperand& this_operand) {
return this_operand.Equals(operand);
});
- DCHECK_NE(it, layout_.cend());
- return static_cast<int>(std::distance(layout_.cbegin(), it));
+ DCHECK_NE(it, layout.cend());
+ return static_cast<int>(std::distance(layout.cbegin(), it));
}
- // Perform the given list of moves on `state_in` and return a newly allocated
- // state with the results.
- Handle<FixedArray> SimulateMoves(ParallelMove* moves,
- Handle<FixedArray> state_in,
- MoveMode move_mode) {
+ Object GetMoveSource(Handle<FixedArray> state, MoveOperands* move) {
+ InstructionOperand from = move->source();
+ if (from.IsConstant()) {
+ Constant constant = instructions_.GetConstant(
+ ConstantOperand::cast(from).virtual_register());
+ Handle<Object> constant_value;
+ switch (constant.type()) {
+ case Constant::kInt32:
+ constant_value =
+ Handle<Smi>(Smi(static_cast<Address>(
+ static_cast<intptr_t>(constant.ToInt32()))),
+ main_isolate());
+ break;
+ case Constant::kInt64:
+ constant_value = Handle<Smi>(
+ Smi(static_cast<Address>(constant.ToInt64())), main_isolate());
+ break;
+ case Constant::kFloat32:
+ constant_value = main_isolate()->factory()->NewHeapNumber(
+ static_cast<double>(constant.ToFloat32()));
+ break;
+ case Constant::kFloat64:
+ constant_value = main_isolate()->factory()->NewHeapNumber(
+ constant.ToFloat64().value());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return *constant_value;
+ } else {
+ int from_index =
+ OperandToStatePosition(setup_layout_, AllocatedOperand::cast(from));
+ return state->get(from_index);
+ }
+ }
+
+ // Perform the given list of sequential moves on `state_in` and return a newly
+ // allocated state with the results.
+ Handle<FixedArray> SimulateSequentialMoves(ParallelMove* moves,
+ Handle<FixedArray> state_in) {
Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(setup_layout_.size()));
// We do not want to modify `state_in` in place so perform the moves on a
// copy.
state_in->CopyTo(0, *state_out, 0, state_in->length());
+ DCHECK_EQ(kPreserveLayout, layout_mode_);
for (auto move : *moves) {
- int to_index =
- OperandToStatePosition(AllocatedOperand::cast(move->destination()));
- InstructionOperand from = move->source();
- if (from.IsConstant()) {
- Constant constant = instructions_.GetConstant(
- ConstantOperand::cast(from).virtual_register());
- Handle<Object> constant_value;
- switch (constant.type()) {
- case Constant::kInt32:
- constant_value =
- Handle<Smi>(Smi(static_cast<Address>(
- static_cast<intptr_t>(constant.ToInt32()))),
- main_isolate());
- break;
- case Constant::kInt64:
- constant_value = Handle<Smi>(
- Smi(static_cast<Address>(constant.ToInt64())), main_isolate());
- break;
- case Constant::kFloat32:
- constant_value = main_isolate()->factory()->NewHeapNumber(
- static_cast<double>(constant.ToFloat32()));
- break;
- case Constant::kFloat64:
- constant_value = main_isolate()->factory()->NewHeapNumber(
- constant.ToFloat64().value());
- break;
- default:
- UNREACHABLE();
- }
- state_out->set(to_index, *constant_value);
- } else {
- int from_index = OperandToStatePosition(AllocatedOperand::cast(from));
- state_out->set(to_index, move_mode == kParallelMoves
- ? state_in->get(from_index)
- : state_out->get(from_index));
+ int to_index = OperandToStatePosition(
+ TeardownLayout(), AllocatedOperand::cast(move->destination()));
+ Object source = GetMoveSource(state_out, move);
+ state_out->set(to_index, source);
+ }
+ return state_out;
+ }
+
+ // Perform the given list of parallel moves on `state_in` and return a newly
+ // allocated state with the results.
+ Handle<FixedArray> SimulateParallelMoves(ParallelMove* moves,
+ Handle<FixedArray> state_in) {
+ Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
+ static_cast<int>(teardown_layout_.size()));
+ for (auto move : *moves) {
+ int to_index = OperandToStatePosition(
+ TeardownLayout(), AllocatedOperand::cast(move->destination()));
+ Object source = GetMoveSource(state_in, move);
+ state_out->set(to_index, source);
+ }
+ // If we generated redundant moves, they were eliminated automatically and
+ // don't appear in the parallel move. Simulate them now.
+ for (auto& operand : teardown_layout_) {
+ int to_index = OperandToStatePosition(TeardownLayout(), operand);
+ if (state_out->get(to_index).IsUndefined()) {
+ int from_index = OperandToStatePosition(setup_layout_, operand);
+ state_out->set(to_index, state_in->get(from_index));
}
}
return state_out;
@@ -781,15 +858,15 @@ class TestEnvironment : public HandleAndZoneScope {
Handle<FixedArray> SimulateSwaps(ParallelMove* swaps,
Handle<FixedArray> state_in) {
Handle<FixedArray> state_out = main_isolate()->factory()->NewFixedArray(
- static_cast<int>(layout_.size()));
+ static_cast<int>(setup_layout_.size()));
// We do not want to modify `state_in` in place so perform the swaps on a
// copy.
state_in->CopyTo(0, *state_out, 0, state_in->length());
for (auto swap : *swaps) {
- int lhs_index =
- OperandToStatePosition(AllocatedOperand::cast(swap->destination()));
- int rhs_index =
- OperandToStatePosition(AllocatedOperand::cast(swap->source()));
+ int lhs_index = OperandToStatePosition(
+ setup_layout_, AllocatedOperand::cast(swap->destination()));
+ int rhs_index = OperandToStatePosition(
+ setup_layout_, AllocatedOperand::cast(swap->source()));
Handle<Object> lhs{state_out->get(lhs_index), main_isolate()};
Handle<Object> rhs{state_out->get(rhs_index), main_isolate()};
state_out->set(lhs_index, *rhs);
@@ -800,16 +877,17 @@ class TestEnvironment : public HandleAndZoneScope {
// Compare the given state with a reference.
void CheckState(Handle<FixedArray> actual, Handle<FixedArray> expected) {
- for (int i = 0; i < static_cast<int>(layout_.size()); i++) {
+ for (int i = 0; i < static_cast<int>(TeardownLayout().size()); i++) {
Handle<Object> actual_value{actual->get(i), main_isolate()};
Handle<Object> expected_value{expected->get(i), main_isolate()};
if (!CompareValues(actual_value, expected_value,
- layout_[i].representation())) {
+ TeardownLayout()[i].representation())) {
std::ostringstream expected_str;
PrintStateValue(expected_str, main_isolate(), expected_value,
- layout_[i]);
+ TeardownLayout()[i]);
std::ostringstream actual_str;
- PrintStateValue(actual_str, main_isolate(), actual_value, layout_[i]);
+ PrintStateValue(actual_str, main_isolate(), actual_value,
+ TeardownLayout()[i]);
FATAL("Expected: '%s' but got '%s'", expected_str.str().c_str(),
actual_str.str().c_str());
}
@@ -846,48 +924,40 @@ class TestEnvironment : public HandleAndZoneScope {
kCannotBeConstant
};
- // Generate parallel moves at random.
- // In sequential mode, they can be incompatible between each other as this
- // doesn't matter to the code generator.
- // In parallel mode, ensure that two destinations can't conflict with each
- // other, and pick sources among the compatible destinations if any, to
- // increase the number of dependencies and stress the gap resolver.
+ // Generate sequential moves at random. Note that they may not be compatible
+ // between each other as this doesn't matter to the code generator.
ParallelMove* GenerateRandomMoves(int size, MoveMode move_mode) {
ParallelMove* parallel_move = main_zone()->New<ParallelMove>(main_zone());
- std::map<MachineRepresentation, std::vector<InstructionOperand*>>
- destinations;
for (int i = 0; i < size;) {
MachineRepresentation rep = CreateRandomMachineRepresentation();
- InstructionOperand source;
- if (move_mode == kParallelMoves && !destinations[rep].empty()) {
- // Try reusing a destination.
- source = *destinations[rep][rng_->NextInt(
- static_cast<int>(destinations[rep].size()))];
- } else {
- source = CreateRandomOperand(kNone, rep);
- }
- MoveOperands mo(source, CreateRandomOperand(kCannotBeConstant, rep));
+ InstructionOperand source = CreateRandomOperand(kNone, rep, kInput);
+ MoveOperands mo(source,
+ CreateRandomOperand(kCannotBeConstant, rep, kOutput));
// It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
// moves.
if (mo.IsRedundant()) continue;
- // Do not generate parallel moves with conflicting destinations.
- if (move_mode == kParallelMoves) {
- bool conflict = std::any_of(
- destinations.begin(), destinations.end(), [&mo](auto& p) {
- return std::any_of(
- p.second.begin(), p.second.end(), [&mo](auto& dest) {
- return dest->InterferesWith(mo.destination());
- });
- });
-
- if (conflict) continue;
- }
- MoveOperands* operands =
- parallel_move->AddMove(mo.source(), mo.destination());
- // Iterate only when a move was created.
+ parallel_move->AddMove(mo.source(), mo.destination());
i++;
- destinations[rep].push_back(&operands->destination());
+ }
+
+ return parallel_move;
+ }
+
+ // Generate parallel moves at random. Generate exactly one move for each
+ // available destination operand. Since the output layout is different from
+ // the input layout, this ensures that each destination operand is initialized
+ // with one of the values in the input fixed array.
+ ParallelMove* GenerateRandomParallelMoves() {
+ ParallelMove* parallel_move = main_zone()->New<ParallelMove>(main_zone());
+ std::vector<AllocatedOperand> destinations = teardown_layout_;
+ std::shuffle(destinations.begin(), destinations.end(), *rng_);
+
+ for (size_t i = 0; i < destinations.size(); ++i) {
+ MachineRepresentation rep = destinations[i].representation();
+ InstructionOperand source = CreateRandomOperand(kNone, rep, kInput);
+ MoveOperands mo(source, destinations[i]);
+ parallel_move->AddMove(mo.source(), mo.destination());
}
return parallel_move;
@@ -898,8 +968,10 @@ class TestEnvironment : public HandleAndZoneScope {
for (int i = 0; i < size;) {
MachineRepresentation rep = CreateRandomMachineRepresentation();
- InstructionOperand lhs = CreateRandomOperand(kCannotBeConstant, rep);
- InstructionOperand rhs = CreateRandomOperand(kCannotBeConstant, rep);
+ InstructionOperand lhs =
+ CreateRandomOperand(kCannotBeConstant, rep, kOutput);
+ InstructionOperand rhs =
+ CreateRandomOperand(kCannotBeConstant, rep, kInput);
MoveOperands mo(lhs, rhs);
// It isn't valid to call `AssembleMove` and `AssembleSwap` with redundant
// moves.
@@ -923,7 +995,8 @@ class TestEnvironment : public HandleAndZoneScope {
}
InstructionOperand CreateRandomOperand(OperandConstraint constraint,
- MachineRepresentation rep) {
+ MachineRepresentation rep,
+ OperandLifetime operand_lifetime) {
// Only generate a Constant if the operand is a source and we have a
// constant with a compatible representation in stock.
bool generate_constant =
@@ -931,7 +1004,7 @@ class TestEnvironment : public HandleAndZoneScope {
(allocated_constants_.find(rep) != allocated_constants_.end());
switch (rng_->NextInt(generate_constant ? 3 : 2)) {
case 0:
- return CreateRandomStackSlotOperand(rep);
+ return CreateRandomStackSlotOperand(rep, operand_lifetime);
case 1:
return CreateRandomRegisterOperand(rep);
case 2:
@@ -946,9 +1019,29 @@ class TestEnvironment : public HandleAndZoneScope {
return allocated_registers_[rep][index];
}
- AllocatedOperand CreateRandomStackSlotOperand(MachineRepresentation rep) {
- int index = rng_->NextInt(static_cast<int>(allocated_slots_[rep].size()));
- return allocated_slots_[rep][index];
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>&
+ AllocatedSlotsIn() {
+ return allocated_slots_in_;
+ }
+
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>&
+ AllocatedSlotsOut() {
+ return layout_mode_ == kPreserveLayout ? allocated_slots_in_
+ : allocated_slots_out_;
+ }
+
+ AllocatedOperand CreateRandomStackSlotOperand(
+ MachineRepresentation rep,
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>& slots) {
+ int index = rng_->NextInt(static_cast<int>(AllocatedSlotsIn()[rep].size()));
+ return slots[rep][index];
+ }
+
+ AllocatedOperand CreateRandomStackSlotOperand(
+ MachineRepresentation rep, OperandLifetime operand_lifetime) {
+ return CreateRandomStackSlotOperand(rep, operand_lifetime == kInput
+ ? AllocatedSlotsIn()
+ : AllocatedSlotsOut());
}
ConstantOperand CreateRandomConstant(MachineRepresentation rep) {
@@ -967,14 +1060,18 @@ class TestEnvironment : public HandleAndZoneScope {
InstructionSequence* instructions() { return &instructions_; }
CallDescriptor* test_descriptor() { return test_descriptor_; }
int stack_slot_count() const { return stack_slot_count_; }
+ LayoutMode layout_mode() const { return layout_mode_; }
private:
ZoneVector<InstructionBlock*> blocks_;
InstructionSequence instructions_;
v8::base::RandomNumberGenerator* rng_;
// The layout describes the type of each element in the environment, in order.
- std::vector<AllocatedOperand> layout_;
+ const LayoutMode layout_mode_;
+ std::vector<AllocatedOperand> setup_layout_;
+ std::vector<AllocatedOperand> teardown_layout_;
CallDescriptor* test_descriptor_;
+ CallDescriptor* teardown_descriptor_;
// Allocated constants, registers and stack slots that we can generate moves
// with. Each per compatible representation.
std::vector<MachineRepresentation> supported_reps_;
@@ -983,7 +1080,9 @@ class TestEnvironment : public HandleAndZoneScope {
std::map<MachineRepresentation, std::vector<AllocatedOperand>>
allocated_registers_;
std::map<MachineRepresentation, std::vector<AllocatedOperand>>
- allocated_slots_;
+ allocated_slots_in_;
+ std::map<MachineRepresentation, std::vector<AllocatedOperand>>
+ allocated_slots_out_;
int stack_slot_count_;
};
@@ -998,8 +1097,8 @@ constexpr int TestEnvironment::kSmiConstantCount;
constexpr int TestEnvironment::kFloatConstantCount;
constexpr int TestEnvironment::kDoubleConstantCount;
-// Wrapper around the CodeGenerator. Code generated by this can only be called
-// using the given `TestEnvironment`.
+// Wrapper around the CodeGenerator. Code generated by this can
+// only be called using the given `TestEnvironment`.
class CodeGeneratorTester {
public:
explicit CodeGeneratorTester(TestEnvironment* environment,
@@ -1016,24 +1115,26 @@ class CodeGeneratorTester {
// CheckAssembleSwap, we'll transparently make use of local spill slots
// instead of stack parameters for those that were picked. This allows us to
// test negative, positive, far and near ranges.
- for (int i = 0; i < (environment->stack_slot_count() / 2);) {
- MachineRepresentation rep =
- environment->CreateRandomMachineRepresentation();
- LocationOperand old_slot =
- LocationOperand::cast(environment->CreateRandomStackSlotOperand(rep));
- // Do not pick the same slot twice.
- if (GetSpillSlot(&old_slot) != spill_slots_.end()) {
- continue;
- }
- LocationOperand new_slot =
- AllocatedOperand(LocationOperand::STACK_SLOT, rep,
- frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
- // Artificially create space on the stack by allocating a new slot.
- if (extra_stack_space > 0) {
- frame_.AllocateSpillSlot(extra_stack_space);
+ if (environment->layout_mode() == kPreserveLayout) {
+ for (int i = 0; i < (environment->stack_slot_count() / 2);) {
+ MachineRepresentation rep =
+ environment->CreateRandomMachineRepresentation();
+ LocationOperand old_slot = LocationOperand::cast(
+ environment->CreateRandomStackSlotOperand(rep, kInput));
+ // Do not pick the same slot twice.
+ if (GetSpillSlot(&old_slot) != spill_slots_.end()) {
+ continue;
+ }
+ LocationOperand new_slot =
+ AllocatedOperand(LocationOperand::STACK_SLOT, rep,
+ frame_.AllocateSpillSlot(GetSlotSizeInBytes(rep)));
+ // Artificially create space on the stack by allocating a new slot.
+ if (extra_stack_space > 0) {
+ frame_.AllocateSpillSlot(extra_stack_space);
+ }
+ spill_slots_.emplace_back(old_slot, new_slot);
+ i++;
}
- spill_slots_.emplace_back(old_slot, new_slot);
- i++;
}
constexpr size_t kMaxUnoptimizedFrameHeight = 0;
@@ -1047,7 +1148,7 @@ class CodeGeneratorTester {
Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight,
kMaxPushedArgumentCount);
- generator_->tasm()->CodeEntry();
+ generator_->masm()->CodeEntry();
// Force a frame to be created.
generator_->frame_access_state()->MarkHasFrame(true);
@@ -1139,10 +1240,10 @@ class CodeGeneratorTester {
void CheckAssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- int start = generator_->tasm()->pc_offset();
+ int start = generator_->masm()->pc_offset();
generator_->AssembleMove(MaybeTranslateSlot(source),
MaybeTranslateSlot(destination));
- CHECK(generator_->tasm()->pc_offset() > start);
+ CHECK(generator_->masm()->pc_offset() > start);
}
void CheckAssembleMoves(ParallelMove* moves) {
@@ -1155,15 +1256,15 @@ class CodeGeneratorTester {
void CheckAssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- int start = generator_->tasm()->pc_offset();
+ int start = generator_->masm()->pc_offset();
generator_->AssembleSwap(MaybeTranslateSlot(source),
MaybeTranslateSlot(destination));
- CHECK(generator_->tasm()->pc_offset() > start);
+ CHECK(generator_->masm()->pc_offset() > start);
}
Handle<Code> Finalize() {
generator_->FinishCode();
- generator_->safepoints()->Emit(generator_->tasm(),
+ generator_->safepoints()->Emit(generator_->masm(),
frame_.GetTotalFrameSlotCount());
generator_->MaybeEmitOutOfLineConstantPool();
@@ -1240,7 +1341,7 @@ class CodeGeneratorTester {
//
// And finally, we are able to compare the resulting FixedArray against a
// reference, computed with a simulation of AssembleMove and AssembleSwap. See
-// SimulateMoves and SimulateSwaps.
+// SimulateSequentialMoves, SimulateParallelMoves and SimulateSwaps.
// Allocate space between slots to increase coverage of moves with larger
// ranges. Note that this affects how much stack is allocated when running the
@@ -1258,8 +1359,7 @@ TEST(FuzzAssembleMove) {
Handle<FixedArray> state_in = env.GenerateInitialState();
ParallelMove* moves = env.GenerateRandomMoves(1000, kSequentialMoves);
- Handle<FixedArray> expected =
- env.SimulateMoves(moves, state_in, kSequentialMoves);
+ Handle<FixedArray> expected = env.SimulateSequentialMoves(moves, state_in);
// Test small and potentially large ranges separately.
for (int extra_space : {0, kExtraSpace}) {
@@ -1281,41 +1381,25 @@ TEST(FuzzAssembleMove) {
// Test integration with the gap resolver by resolving parallel moves first.
TEST(FuzzAssembleParallelMove) {
- TestEnvironment env;
+ TestEnvironment env(kChangeLayout);
- // Generate a sequence of N parallel moves of M moves each.
- constexpr int N = 100;
- constexpr int M = 10;
Handle<FixedArray> state_in = env.GenerateInitialState();
- Handle<FixedArray> state_out =
- env.main_isolate()->factory()->NewFixedArray(state_in->length());
- state_in->CopyTo(0, *state_out, 0, state_in->length());
- ParallelMove* moves[N];
- for (int i = 0; i < N; ++i) {
- moves[i] = env.GenerateRandomMoves(M, kParallelMoves);
- state_out = env.SimulateMoves(moves[i], state_out, kParallelMoves);
- }
-
- // Test small and potentially large ranges separately.
- for (int extra_space : {0, kExtraSpace}) {
- CodeGeneratorTester c(&env, extra_space);
+ ParallelMove* moves = env.GenerateRandomParallelMoves();
+ Handle<FixedArray> state_out = env.SimulateParallelMoves(moves, state_in);
- for (int i = 0; i < N; ++i) {
- // The gap resolver modifies the parallel move in-place. Copy and restore
- // it after assembling.
- ParallelMove* save_moves = CopyMoves(moves[i], env.main_zone());
- c.CheckAssembleMoves(moves[i]);
- moves[i] = save_moves;
- }
+ CodeGeneratorTester c(&env);
- Handle<Code> test = c.FinalizeForExecuting();
- if (v8_flags.print_code) {
- test->Print();
- }
+ // The gap resolver modifies the parallel move in-place. Copy and restore
+ // it after assembling.
+ c.CheckAssembleMoves(moves);
- Handle<FixedArray> actual = env.Run(test, state_in);
- env.CheckState(actual, state_out);
+ Handle<Code> test = c.FinalizeForExecuting();
+ if (v8_flags.print_code) {
+ test->Print();
}
+
+ Handle<FixedArray> actual = env.Run(test, state_in);
+ env.CheckState(actual, state_out);
}
TEST(FuzzAssembleSwap) {
@@ -1361,7 +1445,7 @@ TEST(FuzzAssembleMoveAndSwap) {
// Randomly alternate between swaps and moves.
if (env.rng()->NextInt(2) == 0) {
ParallelMove* move = env.GenerateRandomMoves(1, kSequentialMoves);
- expected = env.SimulateMoves(move, expected, kSequentialMoves);
+ expected = env.SimulateSequentialMoves(move, expected);
c.CheckAssembleMove(&move->at(0)->source(),
&move->at(0)->destination());
} else {
@@ -1579,8 +1663,8 @@ TEST(Regress_1171759) {
AssemblerOptions::Default(handles.main_isolate()), m.ExportForTest())
.ToHandleChecked();
- std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
- handles.main_isolate(), code->raw_instruction_size());
+ std::shared_ptr<wasm::NativeModule> module =
+ AllocateNativeModule(handles.main_isolate(), code->InstructionSize());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start = module->AddCodeForTesting(code)->instructions().begin();