summaryrefslogtreecommitdiff
path: root/spec/workers
diff options
context:
space:
mode:
Diffstat (limited to 'spec/workers')
-rw-r--r--spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb2
-rw-r--r--spec/workers/background_migration_worker_spec.rb100
-rw-r--r--spec/workers/build_finished_worker_spec.rb42
-rw-r--r--spec/workers/bulk_import_worker_spec.rb16
-rw-r--r--spec/workers/ci/delete_objects_worker_spec.rb20
-rw-r--r--spec/workers/concerns/application_worker_spec.rb4
-rw-r--r--spec/workers/concerns/gitlab/github_import/rescheduling_methods_spec.rb2
-rw-r--r--spec/workers/concerns/gitlab/github_import/stage_methods_spec.rb2
-rw-r--r--spec/workers/concerns/limited_capacity/worker_spec.rb3
-rw-r--r--spec/workers/concerns/reenqueuer_spec.rb15
-rw-r--r--spec/workers/container_expiration_policies/cleanup_container_repository_worker_spec.rb234
-rw-r--r--spec/workers/container_expiration_policy_worker_spec.rb160
-rw-r--r--spec/workers/destroy_pages_deployments_worker_spec.rb38
-rw-r--r--spec/workers/git_garbage_collect_worker_spec.rb14
-rw-r--r--spec/workers/jira_connect/sync_branch_worker_spec.rb33
-rw-r--r--spec/workers/jira_connect/sync_merge_request_worker_spec.rb33
-rw-r--r--spec/workers/jira_connect/sync_project_worker_spec.rb77
-rw-r--r--spec/workers/propagate_integration_inherit_descendant_worker_spec.rb30
-rw-r--r--spec/workers/propagate_integration_inherit_worker_spec.rb2
-rw-r--r--spec/workers/purge_dependency_proxy_cache_worker_spec.rb58
-rw-r--r--spec/workers/remove_expired_members_worker_spec.rb44
-rw-r--r--spec/workers/repository_cleanup_worker_spec.rb10
-rw-r--r--spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb39
23 files changed, 860 insertions, 118 deletions
diff --git a/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb b/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb
index ff692d0eda6..c7de8553d86 100644
--- a/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb
+++ b/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb
@@ -6,7 +6,7 @@ RSpec.describe Analytics::InstanceStatistics::CountJobTriggerWorker do
it_behaves_like 'an idempotent worker'
context 'triggers a job for each measurement identifiers' do
- let(:expected_count) { Analytics::InstanceStatistics::Measurement.identifiers.size }
+ let(:expected_count) { Analytics::InstanceStatistics::Measurement.identifier_query_mapping.keys.size }
it 'triggers CounterJobWorker jobs' do
subject.perform
diff --git a/spec/workers/background_migration_worker_spec.rb b/spec/workers/background_migration_worker_spec.rb
index 15e93d62c7d..8094efcaf04 100644
--- a/spec/workers/background_migration_worker_spec.rb
+++ b/spec/workers/background_migration_worker_spec.rb
@@ -12,45 +12,91 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do
end
describe '#perform' do
- it 'performs a background migration' do
- expect(Gitlab::BackgroundMigration)
- .to receive(:perform)
- .with('Foo', [10, 20])
+ before do
+ allow(worker).to receive(:jid).and_return(1)
+ expect(worker).to receive(:always_perform?).and_return(false)
+ end
- worker.perform('Foo', [10, 20])
+ context 'when lease can be obtained' do
+ before do
+ expect(Gitlab::BackgroundMigration)
+ .to receive(:perform)
+ .with('Foo', [10, 20])
+ end
+
+ it 'performs a background migration' do
+ worker.perform('Foo', [10, 20])
+ end
+
+ context 'when lease_attempts is 1' do
+ it 'performs a background migration' do
+ worker.perform('Foo', [10, 20], 1)
+ end
+ end
end
- it 'reschedules a migration if it was performed recently' do
- expect(worker)
- .to receive(:always_perform?)
- .and_return(false)
+ context 'when lease not obtained (migration of same class was performed recently)' do
+ before do
+ expect(Gitlab::BackgroundMigration).not_to receive(:perform)
+
+ worker.lease_for('Foo').try_obtain
+ end
- worker.lease_for('Foo').try_obtain
+ it 'reschedules the migration and decrements the lease_attempts' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
- expect(Gitlab::BackgroundMigration)
- .not_to receive(:perform)
+ worker.perform('Foo', [10, 20], 5)
+ end
- expect(described_class)
- .to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20])
+ context 'when lease_attempts is 1' do
+ it 'reschedules the migration and decrements the lease_attempts' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 0)
- worker.perform('Foo', [10, 20])
+ worker.perform('Foo', [10, 20], 1)
+ end
+ end
+
+ context 'when lease_attempts is 0' do
+ it 'gives up performing the migration' do
+ expect(described_class).not_to receive(:perform_in)
+ expect(Sidekiq.logger).to receive(:warn).with(
+ class: 'Foo',
+ message: 'Job could not get an exclusive lease after several tries. Giving up.',
+ job_id: 1)
+
+ worker.perform('Foo', [10, 20], 0)
+ end
+ end
end
- it 'reschedules a migration if the database is not healthy' do
- allow(worker)
- .to receive(:always_perform?)
- .and_return(false)
+ context 'when database is not healthy' do
+ before do
+ allow(worker).to receive(:healthy_database?).and_return(false)
+ end
- allow(worker)
- .to receive(:healthy_database?)
- .and_return(false)
+ it 'reschedules a migration if the database is not healthy' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
- expect(described_class)
- .to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20])
+ worker.perform('Foo', [10, 20])
+ end
- worker.perform('Foo', [10, 20])
+ context 'when lease_attempts is 0' do
+ it 'gives up performing the migration' do
+ expect(described_class).not_to receive(:perform_in)
+ expect(Sidekiq.logger).to receive(:warn).with(
+ class: 'Foo',
+ message: 'Database was unhealthy after several tries. Giving up.',
+ job_id: 1)
+
+ worker.perform('Foo', [10, 20], 0)
+ end
+ end
end
it 'sets the class that will be executed as the caller_id' do
diff --git a/spec/workers/build_finished_worker_spec.rb b/spec/workers/build_finished_worker_spec.rb
index 11b50961e9e..b0058c76e27 100644
--- a/spec/workers/build_finished_worker_spec.rb
+++ b/spec/workers/build_finished_worker_spec.rb
@@ -11,18 +11,28 @@ RSpec.describe BuildFinishedWorker do
context 'when build exists' do
let!(:build) { create(:ci_build) }
- it 'calculates coverage and calls hooks' do
- expect(BuildTraceSectionsWorker)
- .to receive(:new).ordered.and_call_original
- expect(BuildCoverageWorker)
- .to receive(:new).ordered.and_call_original
-
- expect_any_instance_of(BuildTraceSectionsWorker).to receive(:perform)
- expect_any_instance_of(BuildCoverageWorker).to receive(:perform)
+ it 'calculates coverage and calls hooks', :aggregate_failures do
+ trace_worker = double('trace worker')
+ coverage_worker = double('coverage worker')
+
+ allow(BuildTraceSectionsWorker).to receive(:new).and_return(trace_worker)
+ allow(BuildCoverageWorker).to receive(:new).and_return(coverage_worker)
+
+ # Unfortunately, `ordered` does not seem to work when called within `allow_next_instance_of`
+ # so we're doing this the long and dirty way
+ expect(trace_worker).to receive(:perform).ordered
+ expect(coverage_worker).to receive(:perform).ordered
+
+ expect_next_instance_of(Ci::BuildReportResultWorker) do |instance|
+ expect(instance).to receive(:perform)
+ end
+ expect_next_instance_of(Ci::TestCasesService) do |instance|
+ expect(instance).to receive(:execute)
+ end
+
expect(BuildHooksWorker).to receive(:perform_async)
expect(ExpirePipelineCacheWorker).to receive(:perform_async)
expect(ChatNotificationWorker).not_to receive(:perform_async)
- expect(Ci::BuildReportResultWorker).not_to receive(:perform)
expect(ArchiveTraceWorker).to receive(:perform_in)
subject
@@ -31,7 +41,7 @@ RSpec.describe BuildFinishedWorker do
context 'when build does not exist' do
it 'does not raise exception' do
- expect { described_class.new.perform(123) }
+ expect { described_class.new.perform(non_existing_record_id) }
.not_to raise_error
end
end
@@ -45,17 +55,5 @@ RSpec.describe BuildFinishedWorker do
subject
end
end
-
- context 'when build has a test report' do
- let(:build) { create(:ci_build, :test_reports) }
-
- it 'schedules a BuildReportResult job' do
- expect_next_instance_of(Ci::BuildReportResultWorker) do |worker|
- expect(worker).to receive(:perform).with(build.id)
- end
-
- subject
- end
- end
end
end
diff --git a/spec/workers/bulk_import_worker_spec.rb b/spec/workers/bulk_import_worker_spec.rb
new file mode 100644
index 00000000000..12783f40528
--- /dev/null
+++ b/spec/workers/bulk_import_worker_spec.rb
@@ -0,0 +1,16 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe BulkImportWorker do
+ describe '#perform' do
+ it 'executes Group Importer' do
+ bulk_import_id = 1
+
+ expect(BulkImports::Importers::GroupsImporter)
+ .to receive(:new).with(bulk_import_id).and_return(double(execute: true))
+
+ described_class.new.perform(bulk_import_id)
+ end
+ end
+end
diff --git a/spec/workers/ci/delete_objects_worker_spec.rb b/spec/workers/ci/delete_objects_worker_spec.rb
index 6cb8e0cba37..52d90d7667a 100644
--- a/spec/workers/ci/delete_objects_worker_spec.rb
+++ b/spec/workers/ci/delete_objects_worker_spec.rb
@@ -9,9 +9,14 @@ RSpec.describe Ci::DeleteObjectsWorker do
describe '#perform' do
it 'executes a service' do
+ allow(worker).to receive(:max_running_jobs).and_return(25)
+
expect_next_instance_of(Ci::DeleteObjectsService) do |instance|
expect(instance).to receive(:execute)
- expect(instance).to receive(:remaining_batches_count).once.and_call_original
+ expect(instance).to receive(:remaining_batches_count)
+ .with(max_batch_count: 25)
+ .once
+ .and_call_original
end
worker.perform
@@ -23,7 +28,6 @@ RSpec.describe Ci::DeleteObjectsWorker do
before do
stub_feature_flags(
- ci_delete_objects_low_concurrency: low,
ci_delete_objects_medium_concurrency: medium,
ci_delete_objects_high_concurrency: high
)
@@ -31,13 +35,11 @@ RSpec.describe Ci::DeleteObjectsWorker do
subject(:max_running_jobs) { worker.max_running_jobs }
- where(:low, :medium, :high, :expected) do
- false | false | false | 0
- true | true | true | 2
- true | false | false | 2
- false | true | false | 20
- false | true | true | 20
- false | false | true | 50
+ where(:medium, :high, :expected) do
+ false | false | 2
+ true | false | 20
+ true | true | 20
+ false | true | 50
end
with_them do
diff --git a/spec/workers/concerns/application_worker_spec.rb b/spec/workers/concerns/application_worker_spec.rb
index a18b83f199b..07e11f014c3 100644
--- a/spec/workers/concerns/application_worker_spec.rb
+++ b/spec/workers/concerns/application_worker_spec.rb
@@ -45,7 +45,7 @@ RSpec.describe ApplicationWorker do
instance.jid = 'a jid'
expect(result).to include(
- 'class' => worker.class,
+ 'class' => instance.class.name,
'job_status' => 'running',
'queue' => worker.queue,
'jid' => instance.jid
@@ -69,7 +69,7 @@ RSpec.describe ApplicationWorker do
it 'does not override predefined context keys with custom payload' do
payload['class'] = 'custom value'
- expect(result).to include('class' => worker.class)
+ expect(result).to include('class' => instance.class.name)
end
end
diff --git a/spec/workers/concerns/gitlab/github_import/rescheduling_methods_spec.rb b/spec/workers/concerns/gitlab/github_import/rescheduling_methods_spec.rb
index 09d64fe50bd..8727756ce50 100644
--- a/spec/workers/concerns/gitlab/github_import/rescheduling_methods_spec.rb
+++ b/spec/workers/concerns/gitlab/github_import/rescheduling_methods_spec.rb
@@ -26,7 +26,7 @@ RSpec.describe Gitlab::GithubImport::ReschedulingMethods do
end
context 'with an existing project' do
- let(:project) { create(:project) }
+ let(:project) { create(:project, import_url: 'https://t0ken@github.com/repo/repo.git') }
it 'notifies any waiters upon successfully importing the data' do
expect(worker)
diff --git a/spec/workers/concerns/gitlab/github_import/stage_methods_spec.rb b/spec/workers/concerns/gitlab/github_import/stage_methods_spec.rb
index b7635748498..03e875bcb87 100644
--- a/spec/workers/concerns/gitlab/github_import/stage_methods_spec.rb
+++ b/spec/workers/concerns/gitlab/github_import/stage_methods_spec.rb
@@ -9,6 +9,8 @@ RSpec.describe Gitlab::GithubImport::StageMethods do
end
describe '#perform' do
+ let(:project) { create(:project, import_url: 'https://t0ken@github.com/repo/repo.git') }
+
it 'returns if no project could be found' do
expect(worker).not_to receive(:try_import)
diff --git a/spec/workers/concerns/limited_capacity/worker_spec.rb b/spec/workers/concerns/limited_capacity/worker_spec.rb
index 8a15675c04d..2c33c8666ec 100644
--- a/spec/workers/concerns/limited_capacity/worker_spec.rb
+++ b/spec/workers/concerns/limited_capacity/worker_spec.rb
@@ -121,7 +121,8 @@ RSpec.describe LimitedCapacity::Worker, :clean_gitlab_redis_queues, :aggregate_f
it 'reports prometheus metrics' do
allow(worker).to receive(:perform_work)
- expect(worker).to receive(:report_prometheus_metrics)
+ expect(worker).to receive(:report_prometheus_metrics).once.and_call_original
+ expect(worker).to receive(:report_running_jobs_metrics).twice.and_call_original
perform
end
diff --git a/spec/workers/concerns/reenqueuer_spec.rb b/spec/workers/concerns/reenqueuer_spec.rb
index df0724045c1..ab44042834f 100644
--- a/spec/workers/concerns/reenqueuer_spec.rb
+++ b/spec/workers/concerns/reenqueuer_spec.rb
@@ -40,9 +40,7 @@ RSpec.describe Reenqueuer do
it_behaves_like 'reenqueuer'
- it_behaves_like 'it is rate limited to 1 call per', 5.seconds do
- let(:rate_limited_method) { subject.perform }
- end
+ it_behaves_like '#perform is rate limited to 1 call per', 5.seconds
it 'disables Sidekiq retries' do
expect(job.sidekiq_options_hash).to include('retry' => false)
@@ -98,7 +96,7 @@ RSpec.describe Reenqueuer::ReenqueuerSleeper do
Class.new do
include Reenqueuer::ReenqueuerSleeper
- def rate_limited_method
+ def perform
ensure_minimum_duration(11.seconds) do
# do work
end
@@ -108,12 +106,11 @@ RSpec.describe Reenqueuer::ReenqueuerSleeper do
subject(:dummy) { dummy_class.new }
- # Test that rate_limited_method is rate limited by ensure_minimum_duration
- it_behaves_like 'it is rate limited to 1 call per', 11.seconds do
- let(:rate_limited_method) { dummy.rate_limited_method }
- end
+ # Slightly higher-level test of ensure_minimum_duration since we conveniently
+ # already have this shared example anyway.
+ it_behaves_like '#perform is rate limited to 1 call per', 11.seconds
- # Test ensure_minimum_duration more directly
+ # Unit test ensure_minimum_duration
describe '#ensure_minimum_duration' do
around do |example|
# Allow Timecop.travel without the block form
diff --git a/spec/workers/container_expiration_policies/cleanup_container_repository_worker_spec.rb b/spec/workers/container_expiration_policies/cleanup_container_repository_worker_spec.rb
new file mode 100644
index 00000000000..d98ea1b6ab2
--- /dev/null
+++ b/spec/workers/container_expiration_policies/cleanup_container_repository_worker_spec.rb
@@ -0,0 +1,234 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe ContainerExpirationPolicies::CleanupContainerRepositoryWorker do
+ let_it_be(:repository, reload: true) { create(:container_repository, :cleanup_scheduled) }
+ let_it_be(:project) { repository.project }
+ let_it_be(:policy) { project.container_expiration_policy }
+ let_it_be(:other_repository) { create(:container_repository) }
+
+ let(:worker) { described_class.new }
+
+ describe '#perform_work' do
+ subject { worker.perform_work }
+
+ before do
+ policy.update_column(:enabled, true)
+ end
+
+ RSpec.shared_examples 'handling all repository conditions' do
+ it 'sends the repository for cleaning' do
+ expect(ContainerExpirationPolicies::CleanupService)
+ .to receive(:new).with(repository).and_return(double(execute: cleanup_service_response(repository: repository)))
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:cleanup_status, :finished)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:container_repository_id, repository.id)
+
+ subject
+ end
+
+ context 'with unfinished cleanup' do
+ it 'logs an unfinished cleanup' do
+ expect(ContainerExpirationPolicies::CleanupService)
+ .to receive(:new).with(repository).and_return(double(execute: cleanup_service_response(status: :unfinished, repository: repository)))
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:cleanup_status, :unfinished)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:container_repository_id, repository.id)
+
+ subject
+ end
+ end
+
+ context 'with policy running shortly' do
+ before do
+ repository.project
+ .container_expiration_policy
+ .update_column(:next_run_at, 1.minute.from_now)
+ end
+
+ it 'skips the repository' do
+ expect(ContainerExpirationPolicies::CleanupService).not_to receive(:new)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:container_repository_id, repository.id)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:cleanup_status, :skipped)
+
+ expect { subject }.to change { ContainerRepository.waiting_for_cleanup.count }.from(1).to(0)
+ expect(repository.reload.cleanup_unscheduled?).to be_truthy
+ end
+ end
+
+ context 'with disabled policy' do
+ before do
+ repository.project
+ .container_expiration_policy
+ .disable!
+ end
+
+ it 'skips the repository' do
+ expect(ContainerExpirationPolicies::CleanupService).not_to receive(:new)
+
+ expect { subject }.to change { ContainerRepository.waiting_for_cleanup.count }.from(1).to(0)
+ expect(repository.reload.cleanup_unscheduled?).to be_truthy
+ end
+ end
+ end
+
+ context 'with repository in cleanup scheduled state' do
+ it_behaves_like 'handling all repository conditions'
+ end
+
+ context 'with repository in cleanup unfinished state' do
+ before do
+ repository.cleanup_unfinished!
+ end
+
+ it_behaves_like 'handling all repository conditions'
+ end
+
+ context 'with another repository in cleanup unfinished state' do
+ let_it_be(:another_repository) { create(:container_repository, :cleanup_unfinished) }
+
+ it 'process the cleanup scheduled repository first' do
+ expect(ContainerExpirationPolicies::CleanupService)
+ .to receive(:new).with(repository).and_return(double(execute: cleanup_service_response(repository: repository)))
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:cleanup_status, :finished)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:container_repository_id, repository.id)
+
+ subject
+ end
+ end
+
+ context 'with multiple repositories in cleanup unfinished state' do
+ let_it_be(:repository2) { create(:container_repository, :cleanup_unfinished, expiration_policy_started_at: 20.minutes.ago) }
+ let_it_be(:repository3) { create(:container_repository, :cleanup_unfinished, expiration_policy_started_at: 10.minutes.ago) }
+
+ before do
+ repository.update!(expiration_policy_cleanup_status: :cleanup_unfinished, expiration_policy_started_at: 30.minutes.ago)
+ end
+
+ it 'process the repository with the oldest expiration_policy_started_at' do
+ expect(ContainerExpirationPolicies::CleanupService)
+ .to receive(:new).with(repository).and_return(double(execute: cleanup_service_response(repository: repository)))
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:cleanup_status, :finished)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:container_repository_id, repository.id)
+
+ subject
+ end
+ end
+
+ context 'with repository in cleanup ongoing state' do
+ before do
+ repository.cleanup_ongoing!
+ end
+
+ it 'does not process it' do
+ expect(Projects::ContainerRepository::CleanupTagsService).not_to receive(:new)
+
+ expect { subject }.not_to change { ContainerRepository.waiting_for_cleanup.count }
+ expect(repository.cleanup_ongoing?).to be_truthy
+ end
+ end
+
+ context 'with no repository in any cleanup state' do
+ before do
+ repository.cleanup_unscheduled!
+ end
+
+ it 'does not process it' do
+ expect(Projects::ContainerRepository::CleanupTagsService).not_to receive(:new)
+
+ expect { subject }.not_to change { ContainerRepository.waiting_for_cleanup.count }
+ expect(repository.cleanup_unscheduled?).to be_truthy
+ end
+ end
+
+ context 'with no container repository waiting' do
+ before do
+ repository.destroy!
+ end
+
+ it 'does not execute the cleanup tags service' do
+ expect(Projects::ContainerRepository::CleanupTagsService).not_to receive(:new)
+
+ expect { subject }.not_to change { ContainerRepository.waiting_for_cleanup.count }
+ end
+ end
+
+ context 'with feature flag disabled' do
+ before do
+ stub_feature_flags(container_registry_expiration_policies_throttling: false)
+ end
+
+ it 'is a no-op' do
+ expect(Projects::ContainerRepository::CleanupTagsService).not_to receive(:new)
+
+ expect { subject }.not_to change { ContainerRepository.waiting_for_cleanup.count }
+ end
+ end
+
+ def cleanup_service_response(status: :finished, repository:)
+ ServiceResponse.success(message: "cleanup #{status}", payload: { cleanup_status: status, container_repository_id: repository.id })
+ end
+ end
+
+ describe '#remaining_work_count' do
+ subject { worker.remaining_work_count }
+
+ context 'with container repositoires waiting for cleanup' do
+ let_it_be(:unfinished_repositories) { create_list(:container_repository, 2, :cleanup_unfinished) }
+
+ it { is_expected.to eq(3) }
+
+ it 'logs the work count' do
+ expect_log_info(
+ cleanup_scheduled_count: 1,
+ cleanup_unfinished_count: 2,
+ cleanup_total_count: 3
+ )
+
+ subject
+ end
+ end
+
+ context 'with no container repositories waiting for cleanup' do
+ before do
+ repository.cleanup_ongoing!
+ end
+
+ it { is_expected.to eq(0) }
+
+ it 'logs 0 work count' do
+ expect_log_info(
+ cleanup_scheduled_count: 0,
+ cleanup_unfinished_count: 0,
+ cleanup_total_count: 0
+ )
+
+ subject
+ end
+ end
+ end
+
+ describe '#max_running_jobs' do
+ let(:capacity) { 50 }
+
+ subject { worker.max_running_jobs }
+
+ before do
+ stub_application_setting(container_registry_expiration_policies_worker_capacity: capacity)
+ end
+
+ it { is_expected.to eq(capacity) }
+
+ context 'with feature flag disabled' do
+ before do
+ stub_feature_flags(container_registry_expiration_policies_throttling: false)
+ end
+
+ it { is_expected.to eq(0) }
+ end
+ end
+
+ def expect_log_info(structure)
+ expect(worker.logger)
+ .to receive(:info).with(worker.structured_payload(structure))
+ end
+end
diff --git a/spec/workers/container_expiration_policy_worker_spec.rb b/spec/workers/container_expiration_policy_worker_spec.rb
index 6b185c30670..d9a4f6396f8 100644
--- a/spec/workers/container_expiration_policy_worker_spec.rb
+++ b/spec/workers/container_expiration_policy_worker_spec.rb
@@ -5,71 +5,151 @@ require 'spec_helper'
RSpec.describe ContainerExpirationPolicyWorker do
include ExclusiveLeaseHelpers
- subject { described_class.new.perform }
+ let(:worker) { described_class.new }
+ let(:started_at) { nil }
- RSpec.shared_examples 'not executing any policy' do
- it 'does not run any policy' do
- expect(ContainerExpirationPolicyService).not_to receive(:new)
+ describe '#perform' do
+ subject { worker.perform }
- subject
+ RSpec.shared_examples 'not executing any policy' do
+ it 'does not run any policy' do
+ expect(ContainerExpirationPolicyService).not_to receive(:new)
+
+ subject
+ end
end
- end
- context 'With no container expiration policies' do
- it_behaves_like 'not executing any policy'
- end
+ context 'With no container expiration policies' do
+ it 'does not execute any policies' do
+ expect(ContainerRepository).not_to receive(:for_project_id)
- context 'With container expiration policies' do
- let_it_be(:container_expiration_policy, reload: true) { create(:container_expiration_policy, :runnable) }
- let_it_be(:container_repository) { create(:container_repository, project: container_expiration_policy.project) }
- let_it_be(:user) { container_expiration_policy.project.owner }
+ expect { subject }.not_to change { ContainerRepository.cleanup_scheduled.count }
+ end
+ end
- context 'a valid policy' do
- it 'runs the policy' do
- service = instance_double(ContainerExpirationPolicyService, execute: true)
+ context 'with container expiration policies' do
+ let_it_be(:container_expiration_policy) { create(:container_expiration_policy, :runnable) }
+ let_it_be(:container_repository) { create(:container_repository, project: container_expiration_policy.project) }
- expect(ContainerExpirationPolicyService)
- .to receive(:new).with(container_expiration_policy.project, user).and_return(service)
+ context 'with a valid container expiration policy' do
+ it 'schedules the next run' do
+ expect { subject }.to change { container_expiration_policy.reload.next_run_at }
+ end
- subject
- end
- end
+ it 'marks the container repository as scheduled for cleanup' do
+ expect { subject }.to change { container_repository.reload.cleanup_scheduled? }.from(false).to(true)
+ expect(ContainerRepository.cleanup_scheduled.count).to eq(1)
+ end
- context 'a disabled policy' do
- before do
- container_expiration_policy.disable!
+ it 'calls the limited capacity worker' do
+ expect(ContainerExpirationPolicies::CleanupContainerRepositoryWorker).to receive(:perform_with_capacity)
+
+ subject
+ end
end
- it_behaves_like 'not executing any policy'
- end
+ context 'with a disabled container expiration policy' do
+ before do
+ container_expiration_policy.disable!
+ end
- context 'a policy that is not due for a run' do
- before do
- container_expiration_policy.update_column(:next_run_at, 2.minutes.from_now)
+ it 'does not run the policy' do
+ expect(ContainerRepository).not_to receive(:for_project_id)
+
+ expect { subject }.not_to change { ContainerRepository.cleanup_scheduled.count }
+ end
end
- it_behaves_like 'not executing any policy'
+ context 'with an invalid container expiration policy' do
+ let(:user) { container_expiration_policy.project.owner }
+
+ before do
+ container_expiration_policy.update_column(:name_regex, '*production')
+ end
+
+ it 'disables the policy and tracks an error' do
+ expect(ContainerRepository).not_to receive(:for_project_id)
+ expect(Gitlab::ErrorTracking).to receive(:log_exception).with(instance_of(described_class::InvalidPolicyError), container_expiration_policy_id: container_expiration_policy.id)
+
+ expect { subject }.to change { container_expiration_policy.reload.enabled }.from(true).to(false)
+ expect(ContainerRepository.cleanup_scheduled).to be_empty
+ end
+ end
end
- context 'a policy linked to no container repository' do
+ context 'with exclusive lease taken' do
before do
- container_expiration_policy.container_repositories.delete_all
+ stub_exclusive_lease_taken(worker.lease_key, timeout: 5.hours)
end
- it_behaves_like 'not executing any policy'
+ it 'does not execute any policy' do
+ expect(ContainerExpirationPolicies::CleanupContainerRepositoryWorker).not_to receive(:perform_with_capacity)
+ expect(worker).not_to receive(:runnable_policies)
+
+ expect { subject }.not_to change { ContainerRepository.cleanup_scheduled.count }
+ end
end
- context 'an invalid policy' do
+ context 'with throttling disabled' do
before do
- container_expiration_policy.update_column(:name_regex, '*production')
+ stub_feature_flags(container_registry_expiration_policies_throttling: false)
end
- it 'runs the policy and tracks an error' do
- expect(ContainerExpirationPolicyService)
- .to receive(:new).with(container_expiration_policy.project, user).and_call_original
- expect(Gitlab::ErrorTracking).to receive(:log_exception).with(instance_of(ContainerExpirationPolicyService::InvalidPolicyError), container_expiration_policy_id: container_expiration_policy.id)
+ context 'with no container expiration policies' do
+ it_behaves_like 'not executing any policy'
+ end
- expect { subject }.to change { container_expiration_policy.reload.enabled }.from(true).to(false)
+ context 'with container expiration policies' do
+ let_it_be(:container_expiration_policy, reload: true) { create(:container_expiration_policy, :runnable) }
+ let_it_be(:container_repository) { create(:container_repository, project: container_expiration_policy.project) }
+ let_it_be(:user) { container_expiration_policy.project.owner }
+
+ context 'a valid policy' do
+ it 'runs the policy' do
+ expect(ContainerExpirationPolicyService)
+ .to receive(:new).with(container_expiration_policy.project, user).and_call_original
+ expect(CleanupContainerRepositoryWorker).to receive(:perform_async).once.and_call_original
+
+ expect { subject }.not_to raise_error
+ end
+ end
+
+ context 'a disabled policy' do
+ before do
+ container_expiration_policy.disable!
+ end
+
+ it_behaves_like 'not executing any policy'
+ end
+
+ context 'a policy that is not due for a run' do
+ before do
+ container_expiration_policy.update_column(:next_run_at, 2.minutes.from_now)
+ end
+
+ it_behaves_like 'not executing any policy'
+ end
+
+ context 'a policy linked to no container repository' do
+ before do
+ container_expiration_policy.container_repositories.delete_all
+ end
+
+ it_behaves_like 'not executing any policy'
+ end
+
+ context 'an invalid policy' do
+ before do
+ container_expiration_policy.update_column(:name_regex, '*production')
+ end
+
+ it 'disables the policy and tracks an error' do
+ expect(ContainerExpirationPolicyService).not_to receive(:new).with(container_expiration_policy, user)
+ expect(Gitlab::ErrorTracking).to receive(:log_exception).with(instance_of(described_class::InvalidPolicyError), container_expiration_policy_id: container_expiration_policy.id)
+
+ expect { subject }.to change { container_expiration_policy.reload.enabled }.from(true).to(false)
+ end
+ end
end
end
end
diff --git a/spec/workers/destroy_pages_deployments_worker_spec.rb b/spec/workers/destroy_pages_deployments_worker_spec.rb
new file mode 100644
index 00000000000..2c20c9004ef
--- /dev/null
+++ b/spec/workers/destroy_pages_deployments_worker_spec.rb
@@ -0,0 +1,38 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe DestroyPagesDeploymentsWorker do
+ subject(:worker) { described_class.new }
+
+ let(:project) { create(:project) }
+ let!(:old_deployment) { create(:pages_deployment, project: project) }
+ let!(:last_deployment) { create(:pages_deployment, project: project) }
+ let!(:another_deployment) { create(:pages_deployment) }
+
+ it "doesn't fail if project is already removed" do
+ expect do
+ worker.perform(-1)
+ end.not_to raise_error
+ end
+
+ it 'can be called without last_deployment_id' do
+ expect_next_instance_of(::Pages::DestroyDeploymentsService, project, nil) do |service|
+ expect(service).to receive(:execute).and_call_original
+ end
+
+ expect do
+ worker.perform(project.id)
+ end.to change { PagesDeployment.count }.by(-2)
+ end
+
+ it 'calls destroy service' do
+ expect_next_instance_of(::Pages::DestroyDeploymentsService, project, last_deployment.id) do |service|
+ expect(service).to receive(:execute).and_call_original
+ end
+
+ expect do
+ worker.perform(project.id, last_deployment.id)
+ end.to change { PagesDeployment.count }.by(-1)
+ end
+end
diff --git a/spec/workers/git_garbage_collect_worker_spec.rb b/spec/workers/git_garbage_collect_worker_spec.rb
index fc9115a5ea1..13089549086 100644
--- a/spec/workers/git_garbage_collect_worker_spec.rb
+++ b/spec/workers/git_garbage_collect_worker_spec.rb
@@ -272,6 +272,11 @@ RSpec.describe GitGarbageCollectWorker do
expect(before_packs.count).to be >= 1
+ expect_any_instance_of(Gitlab::GitalyClient::RepositoryService)
+ .to receive(:garbage_collect)
+ .with(bitmaps_enabled, prune: false)
+ .and_call_original
+
subject.perform(project.id, 'gc', lease_key, lease_uuid)
after_packed_refs = packed_refs(project)
after_packs = packs(project)
@@ -292,6 +297,15 @@ RSpec.describe GitGarbageCollectWorker do
subject.perform(project.id, 'gc', lease_key, lease_uuid)
end
+
+ it 'prune calls garbage_collect with the option prune: true' do
+ expect_any_instance_of(Gitlab::GitalyClient::RepositoryService)
+ .to receive(:garbage_collect)
+ .with(bitmaps_enabled, prune: true)
+ .and_return(nil)
+
+ subject.perform(project.id, 'prune', lease_key, lease_uuid)
+ end
end
context 'with bitmaps enabled' do
diff --git a/spec/workers/jira_connect/sync_branch_worker_spec.rb b/spec/workers/jira_connect/sync_branch_worker_spec.rb
index 2da3ea9d256..4aa2f89de7b 100644
--- a/spec/workers/jira_connect/sync_branch_worker_spec.rb
+++ b/spec/workers/jira_connect/sync_branch_worker_spec.rb
@@ -4,7 +4,10 @@ require 'spec_helper'
RSpec.describe JiraConnect::SyncBranchWorker do
describe '#perform' do
- let_it_be(:project) { create(:project, :repository) }
+ let_it_be(:group) { create(:group) }
+ let_it_be(:project) { create(:project, :repository, group: group) }
+ let_it_be(:subscription) { create(:jira_connect_subscription, installation: create(:jira_connect_installation), namespace: group) }
+
let(:project_id) { project.id }
let(:branch_name) { 'master' }
let(:commit_shas) { %w(b83d6e3 5a62481) }
@@ -13,7 +16,7 @@ RSpec.describe JiraConnect::SyncBranchWorker do
def expect_jira_sync_service_execute(args)
expect_next_instance_of(JiraConnect::SyncService) do |instance|
- expect(instance).to receive(:execute).with(args)
+ expect(instance).to receive(:execute).with(args.merge(update_sequence_id: nil))
end
end
@@ -61,5 +64,31 @@ RSpec.describe JiraConnect::SyncBranchWorker do
subject
end
end
+
+ context 'with update_sequence_id' do
+ let(:update_sequence_id) { 1 }
+ let(:request_url) { 'https://sample.atlassian.net/rest/devinfo/0.10/bulk' }
+ let(:request_body) do
+ {
+ repositories: [
+ Atlassian::JiraConnect::Serializers::RepositoryEntity.represent(
+ project,
+ commits: project.commits_by(oids: commit_shas),
+ branches: [project.repository.find_branch(branch_name)],
+ update_sequence_id: update_sequence_id
+ )
+ ]
+ }.to_json
+ end
+
+ subject { described_class.new.perform(project_id, branch_name, commit_shas, update_sequence_id) }
+
+ it 'sends the reqeust with custom update_sequence_id' do
+ expect(Atlassian::JiraConnect::Client).to receive(:post)
+ .with(URI(request_url), headers: anything, body: request_body)
+
+ subject
+ end
+ end
end
end
diff --git a/spec/workers/jira_connect/sync_merge_request_worker_spec.rb b/spec/workers/jira_connect/sync_merge_request_worker_spec.rb
index 764201e750a..b3c0db4f260 100644
--- a/spec/workers/jira_connect/sync_merge_request_worker_spec.rb
+++ b/spec/workers/jira_connect/sync_merge_request_worker_spec.rb
@@ -4,14 +4,18 @@ require 'spec_helper'
RSpec.describe JiraConnect::SyncMergeRequestWorker do
describe '#perform' do
- let(:merge_request) { create(:merge_request) }
+ let_it_be(:group) { create(:group) }
+ let_it_be(:project) { create(:project, :repository, group: group) }
+ let_it_be(:subscription) { create(:jira_connect_subscription, installation: create(:jira_connect_installation), namespace: group) }
+ let_it_be(:merge_request) { create(:merge_request, source_project: project) }
+
let(:merge_request_id) { merge_request.id }
subject { described_class.new.perform(merge_request_id) }
it 'calls JiraConnect::SyncService#execute' do
expect_next_instance_of(JiraConnect::SyncService) do |service|
- expect(service).to receive(:execute).with(merge_requests: [merge_request])
+ expect(service).to receive(:execute).with(merge_requests: [merge_request], update_sequence_id: nil)
end
subject
@@ -26,5 +30,30 @@ RSpec.describe JiraConnect::SyncMergeRequestWorker do
subject
end
end
+
+ context 'with update_sequence_id' do
+ let(:update_sequence_id) { 1 }
+ let(:request_url) { 'https://sample.atlassian.net/rest/devinfo/0.10/bulk' }
+ let(:request_body) do
+ {
+ repositories: [
+ Atlassian::JiraConnect::Serializers::RepositoryEntity.represent(
+ project,
+ merge_requests: [merge_request],
+ update_sequence_id: update_sequence_id
+ )
+ ]
+ }.to_json
+ end
+
+ subject { described_class.new.perform(merge_request_id, update_sequence_id) }
+
+ it 'sends the request with custom update_sequence_id' do
+ expect(Atlassian::JiraConnect::Client).to receive(:post)
+ .with(URI(request_url), headers: anything, body: request_body)
+
+ subject
+ end
+ end
end
end
diff --git a/spec/workers/jira_connect/sync_project_worker_spec.rb b/spec/workers/jira_connect/sync_project_worker_spec.rb
new file mode 100644
index 00000000000..25210de828c
--- /dev/null
+++ b/spec/workers/jira_connect/sync_project_worker_spec.rb
@@ -0,0 +1,77 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe JiraConnect::SyncProjectWorker, factory_default: :keep do
+ describe '#perform' do
+ let_it_be(:project) { create_default(:project) }
+ let!(:mr_with_jira_title) { create(:merge_request, :unique_branches, title: 'TEST-123') }
+ let!(:mr_with_jira_description) { create(:merge_request, :unique_branches, description: 'TEST-323') }
+ let!(:mr_with_other_title) { create(:merge_request, :unique_branches) }
+ let!(:jira_subscription) { create(:jira_connect_subscription, namespace: project.namespace) }
+
+ let(:jira_connect_sync_service) { JiraConnect::SyncService.new(project) }
+ let(:job_args) { [project.id, update_sequence_id] }
+ let(:update_sequence_id) { 1 }
+
+ before do
+ stub_request(:post, 'https://sample.atlassian.net/rest/devinfo/0.10/bulk').to_return(status: 200, body: '', headers: {})
+
+ jira_connect_sync_service
+ allow(JiraConnect::SyncService).to receive(:new) { jira_connect_sync_service }
+ end
+
+ context 'when the project is not found' do
+ it 'does not raise an error' do
+ expect { described_class.new.perform('non_existing_record_id', update_sequence_id) }.not_to raise_error
+ end
+ end
+
+ it 'avoids N+1 database queries' do
+ control_count = ActiveRecord::QueryRecorder.new { described_class.new.perform(project.id, update_sequence_id) }.count
+
+ create(:merge_request, :unique_branches, title: 'TEST-123')
+
+ expect { described_class.new.perform(project.id, update_sequence_id) }.not_to exceed_query_limit(control_count)
+ end
+
+ it_behaves_like 'an idempotent worker' do
+ let(:request_url) { 'https://sample.atlassian.net/rest/devinfo/0.10/bulk' }
+ let(:request_body) do
+ {
+ repositories: [
+ Atlassian::JiraConnect::Serializers::RepositoryEntity.represent(
+ project,
+ merge_requests: [mr_with_jira_description, mr_with_jira_title],
+ update_sequence_id: update_sequence_id
+ )
+ ]
+ }.to_json
+ end
+
+ it 'sends the request with custom update_sequence_id' do
+ expect(Atlassian::JiraConnect::Client).to receive(:post)
+ .exactly(IdempotentWorkerHelper::WORKER_EXEC_TIMES).times
+ .with(URI(request_url), headers: anything, body: request_body)
+
+ subject
+ end
+
+ context 'when the number of merge requests to sync is higher than the limit' do
+ let!(:most_recent_merge_request) { create(:merge_request, :unique_branches, description: 'TEST-323', title: 'TEST-123') }
+
+ before do
+ stub_const("#{described_class}::MERGE_REQUEST_LIMIT", 1)
+ end
+
+ it 'syncs only the most recent merge requests within the limit' do
+ expect(jira_connect_sync_service).to receive(:execute)
+ .exactly(IdempotentWorkerHelper::WORKER_EXEC_TIMES).times
+ .with(merge_requests: [most_recent_merge_request], update_sequence_id: update_sequence_id)
+
+ subject
+ end
+ end
+ end
+ end
+end
diff --git a/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb b/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb
new file mode 100644
index 00000000000..b5eb0f69017
--- /dev/null
+++ b/spec/workers/propagate_integration_inherit_descendant_worker_spec.rb
@@ -0,0 +1,30 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe PropagateIntegrationInheritDescendantWorker do
+ let_it_be(:group) { create(:group) }
+ let_it_be(:subgroup) { create(:group, parent: group) }
+ let_it_be(:group_integration) { create(:redmine_service, group: group, project: nil) }
+ let_it_be(:subgroup_integration) { create(:redmine_service, group: subgroup, project: nil, inherit_from_id: group_integration.id) }
+
+ it_behaves_like 'an idempotent worker' do
+ let(:job_args) { [group_integration.id, subgroup_integration.id, subgroup_integration.id] }
+
+ it 'calls to BulkUpdateIntegrationService' do
+ expect(BulkUpdateIntegrationService).to receive(:new)
+ .with(group_integration, match_array(subgroup_integration)).twice
+ .and_return(double(execute: nil))
+
+ subject
+ end
+ end
+
+ context 'with an invalid integration id' do
+ it 'returns without failure' do
+ expect(BulkUpdateIntegrationService).not_to receive(:new)
+
+ subject.perform(0, subgroup_integration.id, subgroup_integration.id)
+ end
+ end
+end
diff --git a/spec/workers/propagate_integration_inherit_worker_spec.rb b/spec/workers/propagate_integration_inherit_worker_spec.rb
index cbfee29a6a0..39219eaa3b5 100644
--- a/spec/workers/propagate_integration_inherit_worker_spec.rb
+++ b/spec/workers/propagate_integration_inherit_worker_spec.rb
@@ -12,7 +12,7 @@ RSpec.describe PropagateIntegrationInheritWorker do
it_behaves_like 'an idempotent worker' do
let(:job_args) { [integration.id, integration1.id, integration3.id] }
- it 'calls to BulkCreateIntegrationService' do
+ it 'calls to BulkUpdateIntegrationService' do
expect(BulkUpdateIntegrationService).to receive(:new)
.with(integration, match_array(integration1)).twice
.and_return(double(execute: nil))
diff --git a/spec/workers/purge_dependency_proxy_cache_worker_spec.rb b/spec/workers/purge_dependency_proxy_cache_worker_spec.rb
new file mode 100644
index 00000000000..9cd3b6636f5
--- /dev/null
+++ b/spec/workers/purge_dependency_proxy_cache_worker_spec.rb
@@ -0,0 +1,58 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe PurgeDependencyProxyCacheWorker do
+ let_it_be(:user) { create(:admin) }
+ let_it_be(:blob) { create(:dependency_proxy_blob )}
+ let_it_be(:group, reload: true) { blob.group }
+ let_it_be(:group_id) { group.id }
+
+ subject { described_class.new.perform(user.id, group_id) }
+
+ before do
+ stub_config(dependency_proxy: { enabled: true })
+ group.create_dependency_proxy_setting!(enabled: true)
+ end
+
+ describe '#perform' do
+ shared_examples 'returns nil' do
+ it 'returns nil' do
+ expect { subject }.not_to change { group.dependency_proxy_blobs.size }
+ expect(subject).to be_nil
+ end
+ end
+
+ context 'an admin user' do
+ include_examples 'an idempotent worker' do
+ let(:job_args) { [user.id, group_id] }
+
+ it 'deletes the blobs and returns ok' do
+ expect(group.dependency_proxy_blobs.size).to eq(1)
+
+ subject
+
+ expect(group.dependency_proxy_blobs.size).to eq(0)
+ end
+ end
+ end
+
+ context 'a non-admin user' do
+ let(:user) { create(:user) }
+
+ it_behaves_like 'returns nil'
+ end
+
+ context 'an invalid user id' do
+ let(:user) { double('User', id: 99999 ) }
+
+ it_behaves_like 'returns nil'
+ end
+
+ context 'an invalid group' do
+ let(:group_id) { 99999 }
+
+ it_behaves_like 'returns nil'
+ end
+ end
+end
diff --git a/spec/workers/remove_expired_members_worker_spec.rb b/spec/workers/remove_expired_members_worker_spec.rb
index 8a34b41834b..5642de05731 100644
--- a/spec/workers/remove_expired_members_worker_spec.rb
+++ b/spec/workers/remove_expired_members_worker_spec.rb
@@ -31,6 +31,50 @@ RSpec.describe RemoveExpiredMembersWorker do
end
end
+ context 'project bots' do
+ let(:project) { create(:project) }
+
+ context 'expired project bot', :sidekiq_inline do
+ let_it_be(:expired_project_bot) { create(:user, :project_bot) }
+
+ before do
+ project.add_user(expired_project_bot, :maintainer, expires_at: 1.day.from_now)
+ travel_to(3.days.from_now)
+ end
+
+ it 'removes expired project bot membership' do
+ expect { worker.perform }.to change { Member.count }.by(-1)
+ expect(Member.find_by(user_id: expired_project_bot.id)).to be_nil
+ end
+
+ it 'deletes expired project bot' do
+ worker.perform
+
+ expect(User.exists?(expired_project_bot.id)).to be(false)
+ end
+ end
+
+ context 'non-expired project bot' do
+ let_it_be(:other_project_bot) { create(:user, :project_bot) }
+
+ before do
+ project.add_user(other_project_bot, :maintainer, expires_at: 10.days.from_now)
+ travel_to(3.days.from_now)
+ end
+
+ it 'does not remove expired project bot that expires in the future' do
+ expect { worker.perform }.to change { Member.count }.by(0)
+ expect(other_project_bot.reload).to be_present
+ end
+
+ it 'does not delete project bot expiring in the future' do
+ worker.perform
+
+ expect(User.exists?(other_project_bot.id)).to be(true)
+ end
+ end
+ end
+
context 'group members' do
let_it_be(:expired_group_member) { create(:group_member, expires_at: 1.day.from_now, access_level: GroupMember::DEVELOPER) }
let_it_be(:group_member_expiring_in_future) { create(:group_member, expires_at: 10.days.from_now, access_level: GroupMember::DEVELOPER) }
diff --git a/spec/workers/repository_cleanup_worker_spec.rb b/spec/workers/repository_cleanup_worker_spec.rb
index f5887d08bd2..2b700b944d2 100644
--- a/spec/workers/repository_cleanup_worker_spec.rb
+++ b/spec/workers/repository_cleanup_worker_spec.rb
@@ -40,6 +40,8 @@ RSpec.describe RepositoryCleanupWorker do
describe '#sidekiq_retries_exhausted' do
let(:job) { { 'args' => [project.id, user.id], 'error_message' => 'Error' } }
+ subject(:sidekiq_retries_exhausted) { described_class.sidekiq_retries_exhausted_block.call(job, StandardError.new) }
+
it 'does not send a failure notification for a RecordNotFound error' do
expect(NotificationService).not_to receive(:new)
@@ -51,7 +53,13 @@ RSpec.describe RepositoryCleanupWorker do
expect(service).to receive(:repository_cleanup_failure).with(project, user, 'Error')
end
- described_class.sidekiq_retries_exhausted_block.call(job, StandardError.new)
+ sidekiq_retries_exhausted
+ end
+
+ it 'cleans up the attempt' do
+ expect(Projects::CleanupService).to receive(:cleanup_after).with(project)
+
+ sidekiq_retries_exhausted
end
end
end
diff --git a/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb b/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb
new file mode 100644
index 00000000000..0dd50efba1c
--- /dev/null
+++ b/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb
@@ -0,0 +1,39 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe ScheduleMergeRequestCleanupRefsWorker do
+ subject(:worker) { described_class.new }
+
+ describe '#perform' do
+ before do
+ allow(MergeRequest::CleanupSchedule)
+ .to receive(:scheduled_merge_request_ids)
+ .with(described_class::LIMIT)
+ .and_return([1, 2, 3, 4])
+ end
+
+ it 'does nothing if the database is read-only' do
+ allow(Gitlab::Database).to receive(:read_only?).and_return(true)
+ expect(MergeRequestCleanupRefsWorker).not_to receive(:bulk_perform_in)
+
+ worker.perform
+ end
+
+ include_examples 'an idempotent worker' do
+ it 'schedules MergeRequestCleanupRefsWorker to be performed by batch' do
+ expect(MergeRequestCleanupRefsWorker)
+ .to receive(:bulk_perform_in)
+ .with(
+ described_class::DELAY,
+ [[1], [2], [3], [4]],
+ batch_size: described_class::BATCH_SIZE
+ )
+
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:merge_requests_count, 4)
+
+ worker.perform
+ end
+ end
+ end
+end