summaryrefslogtreecommitdiff
path: root/qa/qa/specs/features/api/3_create/gitaly
diff options
context:
space:
mode:
Diffstat (limited to 'qa/qa/specs/features/api/3_create/gitaly')
-rw-r--r--qa/qa/specs/features/api/3_create/gitaly/automatic_failover_and_recovery_spec.rb21
-rw-r--r--qa/qa/specs/features/api/3_create/gitaly/praefect_dataloss_spec.rb57
-rw-r--r--qa/qa/specs/features/api/3_create/gitaly/praefect_replication_queue_spec.rb4
3 files changed, 65 insertions, 17 deletions
diff --git a/qa/qa/specs/features/api/3_create/gitaly/automatic_failover_and_recovery_spec.rb b/qa/qa/specs/features/api/3_create/gitaly/automatic_failover_and_recovery_spec.rb
index 6a9be19efdd..55ae0d215cf 100644
--- a/qa/qa/specs/features/api/3_create/gitaly/automatic_failover_and_recovery_spec.rb
+++ b/qa/qa/specs/features/api/3_create/gitaly/automatic_failover_and_recovery_spec.rb
@@ -9,37 +9,30 @@ module QA
project = nil
let(:intial_commit_message) { 'Initial commit' }
- let(:first_added_commit_message) { 'pushed to primary gitaly node' }
- let(:second_added_commit_message) { 'commit to failover node' }
+ let(:first_added_commit_message) { 'first_added_commit_message to primary gitaly node' }
+ let(:second_added_commit_message) { 'second_added_commit_message to failover node' }
before(:context) do
- # Reset the cluster in case previous tests left it in a bad state
praefect_manager.start_all_nodes
project = Resource::Project.fabricate! do |project|
project.name = "gitaly_cluster"
project.initialize_with_readme = true
end
- end
-
- after do
- praefect_manager.start_all_nodes
+ # We need to ensure that the the project is replicated to all nodes before proceeding with this test
+ praefect_manager.wait_for_replication(project.id)
end
it 'automatically fails over', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347830' do
- # Create a new project with a commit and wait for it to replicate
-
- # make sure that our project is published to the 'primary' node
+ # stop other nodes, so we can control which node the commit is sent to
praefect_manager.stop_secondary_node
praefect_manager.stop_tertiary_node
- praefect_manager.wait_for_secondary_node_health_check_failure
- praefect_manager.wait_for_tertiary_node_health_check_failure
Resource::Repository::ProjectPush.fabricate! do |push|
push.project = project
push.commit_message = first_added_commit_message
push.new_branch = false
- push.file_content = "This should exist on all nodes"
+ push.file_content = 'This file created on gitaly1 while gitaly2/gitaly3 not running'
end
praefect_manager.start_all_nodes
@@ -56,7 +49,7 @@ module QA
commit.add_files([
{
file_path: "file-#{SecureRandom.hex(8)}",
- content: 'This should exist on one node before reconciliation'
+ content: 'This is created on gitaly2/gitaly3 while gitaly1 is unavailable'
}
])
end
diff --git a/qa/qa/specs/features/api/3_create/gitaly/praefect_dataloss_spec.rb b/qa/qa/specs/features/api/3_create/gitaly/praefect_dataloss_spec.rb
new file mode 100644
index 00000000000..6e2a34afb3e
--- /dev/null
+++ b/qa/qa/specs/features/api/3_create/gitaly/praefect_dataloss_spec.rb
@@ -0,0 +1,57 @@
+# frozen_string_literal: true
+
+module QA
+ RSpec.describe 'Create' do
+ context 'Praefect dataloss commands', :orchestrated, :gitaly_cluster do
+ let(:praefect_manager) { Service::PraefectManager.new }
+
+ let(:project) do
+ Resource::Project.fabricate! do |project|
+ project.name = 'gitaly_cluster-dataloss-project'
+ project.initialize_with_readme = true
+ end
+ end
+
+ before do
+ praefect_manager.start_all_nodes
+ end
+
+ it 'confirms that changes are synced across all storages', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/352691' do
+ expect { praefect_manager.praefect_dataloss_information(project.id) }
+ .to(eventually_include('All repositories are fully available on all assigned storages!')
+ .within(max_duration: 60))
+ end
+
+ it 'identifies how many changes are not in sync across storages', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/352692' do
+ # Ensure our test repository is replicated and in a consistent state prior to test
+ praefect_manager.wait_for_project_synced_across_all_storages(project.id)
+
+ # testing for gitaly2 'out of sync'
+ praefect_manager.stop_secondary_node
+
+ number_of_changes = 3
+ 1.upto(number_of_changes) do |i|
+ Resource::Repository::Commit.fabricate_via_api! do |commit|
+ commit.project = project
+ commit.branch = "newbranch-#{SecureRandom.hex(8)}"
+ commit.start_branch = project.default_branch
+ commit.commit_message = 'Add new file'
+ commit.add_files([
+ { file_path: "new_file-#{SecureRandom.hex(8)}.txt", content: 'new file' }
+ ])
+ end
+ end
+
+ # testing for gitaly3 'in sync' but marked unhealthy
+ praefect_manager.stop_tertiary_node
+
+ project_data_loss = praefect_manager.praefect_dataloss_information(project.id)
+ aggregate_failures "validate dataloss identified" do
+ expect(project_data_loss).to include('gitaly1, assigned host')
+ expect(project_data_loss).to include("gitaly2 is behind by #{number_of_changes} changes or less, assigned host, unhealthy")
+ expect(project_data_loss).to include('gitaly3, assigned host, unhealthy')
+ end
+ end
+ end
+ end
+end
diff --git a/qa/qa/specs/features/api/3_create/gitaly/praefect_replication_queue_spec.rb b/qa/qa/specs/features/api/3_create/gitaly/praefect_replication_queue_spec.rb
index e7e23124312..d066953d12e 100644
--- a/qa/qa/specs/features/api/3_create/gitaly/praefect_replication_queue_spec.rb
+++ b/qa/qa/specs/features/api/3_create/gitaly/praefect_replication_queue_spec.rb
@@ -4,7 +4,7 @@ require 'parallel'
module QA
RSpec.describe 'Create' do
- context 'Gitaly Cluster replication queue', :orchestrated, :gitaly_cluster, :skip_live_env, quarantine: { issue: 'https://gitlab.com/gitlab-org/gitlab/-/issues/346453', type: :flaky } do
+ context 'Gitaly Cluster replication queue', :orchestrated, :gitaly_cluster, :skip_live_env do
let(:praefect_manager) { Service::PraefectManager.new }
let(:project) do
Resource::Project.fabricate! do |project|
@@ -15,12 +15,10 @@ module QA
before do
praefect_manager.start_all_nodes
- praefect_manager.start_praefect
end
after do
praefect_manager.start_all_nodes
- praefect_manager.start_praefect
praefect_manager.clear_replication_queue
end