summaryrefslogtreecommitdiff
path: root/spec/workers
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-10-21 07:08:36 +0000
committerGitLab Bot <gitlab-bot@gitlab.com>2020-10-21 07:08:36 +0000
commit48aff82709769b098321c738f3444b9bdaa694c6 (patch)
treee00c7c43e2d9b603a5a6af576b1685e400410dee /spec/workers
parent879f5329ee916a948223f8f43d77fba4da6cd028 (diff)
downloadgitlab-ce-13.5.0-rc42.tar.gz
Add latest changes from gitlab-org/gitlab@13-5-stable-eev13.5.0-rc42
Diffstat (limited to 'spec/workers')
-rw-r--r--spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb12
-rw-r--r--spec/workers/analytics/instance_statistics/counter_job_worker_spec.rb20
-rw-r--r--spec/workers/authorized_project_update/periodic_recalculate_worker_spec.rb12
-rw-r--r--spec/workers/authorized_project_update/user_refresh_over_user_range_worker_spec.rb12
-rw-r--r--spec/workers/build_finished_worker_spec.rb2
-rw-r--r--spec/workers/ci/build_trace_chunk_flush_worker_spec.rb4
-rw-r--r--spec/workers/ci/delete_objects_worker_spec.rb49
-rw-r--r--spec/workers/ci/schedule_delete_objects_cron_worker_spec.rb15
-rw-r--r--spec/workers/cleanup_container_repository_worker_spec.rb23
-rw-r--r--spec/workers/concerns/limited_capacity/job_tracker_spec.rb100
-rw-r--r--spec/workers/concerns/limited_capacity/worker_spec.rb285
-rw-r--r--spec/workers/container_expiration_policy_worker_spec.rb46
-rw-r--r--spec/workers/deployments/drop_older_deployments_worker_spec.rb18
-rw-r--r--spec/workers/deployments/execute_hooks_worker_spec.rb51
-rw-r--r--spec/workers/deployments/link_merge_request_worker_spec.rb71
-rw-r--r--spec/workers/deployments/success_worker_spec.rb12
-rw-r--r--spec/workers/deployments/update_environment_worker_spec.rb63
-rw-r--r--spec/workers/design_management/copy_design_collection_worker_spec.rb39
-rw-r--r--spec/workers/design_management/new_version_worker_spec.rb4
-rw-r--r--spec/workers/disallow_two_factor_for_group_worker_spec.rb22
-rw-r--r--spec/workers/disallow_two_factor_for_subgroups_worker_spec.rb17
-rw-r--r--spec/workers/export_csv_worker_spec.rb20
-rw-r--r--spec/workers/git_garbage_collect_worker_spec.rb58
-rw-r--r--spec/workers/group_export_worker_spec.rb10
-rw-r--r--spec/workers/group_import_worker_spec.rb63
-rw-r--r--spec/workers/incident_management/add_severity_system_note_worker_spec.rb60
-rw-r--r--spec/workers/incident_management/process_alert_worker_spec.rb2
-rw-r--r--spec/workers/incident_management/process_prometheus_alert_worker_spec.rb2
-rw-r--r--spec/workers/issuable_export_csv_worker_spec.rb73
-rw-r--r--spec/workers/member_invitation_reminder_emails_worker_spec.rb39
-rw-r--r--spec/workers/metrics/dashboard/prune_old_annotations_worker_spec.rb2
-rw-r--r--spec/workers/metrics/dashboard/sync_dashboards_worker_spec.rb25
-rw-r--r--spec/workers/post_receive_spec.rb4
-rw-r--r--spec/workers/project_export_worker_spec.rb4
-rw-r--r--spec/workers/propagate_integration_group_worker_spec.rb44
-rw-r--r--spec/workers/propagate_integration_inherit_worker_spec.rb32
-rw-r--r--spec/workers/propagate_integration_project_worker_spec.rb44
-rw-r--r--spec/workers/web_hooks/destroy_worker_spec.rb59
38 files changed, 1247 insertions, 171 deletions
diff --git a/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb b/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb
index 620900b3402..ff692d0eda6 100644
--- a/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb
+++ b/spec/workers/analytics/instance_statistics/count_job_trigger_worker_spec.rb
@@ -14,16 +14,4 @@ RSpec.describe Analytics::InstanceStatistics::CountJobTriggerWorker do
expect(Analytics::InstanceStatistics::CounterJobWorker.jobs.count).to eq(expected_count)
end
end
-
- context 'when the `store_instance_statistics_measurements` feature flag is off' do
- before do
- stub_feature_flags(store_instance_statistics_measurements: false)
- end
-
- it 'does not trigger any CounterJobWorker job' do
- subject.perform
-
- expect(Analytics::InstanceStatistics::CounterJobWorker.jobs.count).to eq(0)
- end
- end
end
diff --git a/spec/workers/analytics/instance_statistics/counter_job_worker_spec.rb b/spec/workers/analytics/instance_statistics/counter_job_worker_spec.rb
index 8db86071dc4..667ec0bcb75 100644
--- a/spec/workers/analytics/instance_statistics/counter_job_worker_spec.rb
+++ b/spec/workers/analytics/instance_statistics/counter_job_worker_spec.rb
@@ -18,7 +18,7 @@ RSpec.describe Analytics::InstanceStatistics::CounterJobWorker do
it 'counts a scope and stores the result' do
subject
- measurement = Analytics::InstanceStatistics::Measurement.first
+ measurement = Analytics::InstanceStatistics::Measurement.users.first
expect(measurement.recorded_at).to be_like_time(recorded_at)
expect(measurement.identifier).to eq('users')
expect(measurement.count).to eq(2)
@@ -33,7 +33,7 @@ RSpec.describe Analytics::InstanceStatistics::CounterJobWorker do
it 'sets 0 as the count' do
subject
- measurement = Analytics::InstanceStatistics::Measurement.first
+ measurement = Analytics::InstanceStatistics::Measurement.groups.first
expect(measurement.recorded_at).to be_like_time(recorded_at)
expect(measurement.identifier).to eq('groups')
expect(measurement.count).to eq(0)
@@ -51,4 +51,20 @@ RSpec.describe Analytics::InstanceStatistics::CounterJobWorker do
expect { subject }.not_to change { Analytics::InstanceStatistics::Measurement.count }
end
+
+ context 'when pipelines_succeeded identifier is passed' do
+ let_it_be(:pipeline) { create(:ci_pipeline, :success) }
+
+ let(:successful_pipelines_measurement_identifier) { ::Analytics::InstanceStatistics::Measurement.identifiers.fetch(:pipelines_succeeded) }
+ let(:job_args) { [successful_pipelines_measurement_identifier, pipeline.id, pipeline.id, recorded_at] }
+
+ it 'counts successful pipelines' do
+ subject
+
+ measurement = Analytics::InstanceStatistics::Measurement.pipelines_succeeded.first
+ expect(measurement.recorded_at).to be_like_time(recorded_at)
+ expect(measurement.identifier).to eq('pipelines_succeeded')
+ expect(measurement.count).to eq(1)
+ end
+ end
end
diff --git a/spec/workers/authorized_project_update/periodic_recalculate_worker_spec.rb b/spec/workers/authorized_project_update/periodic_recalculate_worker_spec.rb
index 2d633828ae3..9d4d48d0568 100644
--- a/spec/workers/authorized_project_update/periodic_recalculate_worker_spec.rb
+++ b/spec/workers/authorized_project_update/periodic_recalculate_worker_spec.rb
@@ -11,17 +11,5 @@ RSpec.describe AuthorizedProjectUpdate::PeriodicRecalculateWorker do
subject.perform
end
-
- context 'feature flag :periodic_project_authorization_recalculation is disabled' do
- before do
- stub_feature_flags(periodic_project_authorization_recalculation: false)
- end
-
- it 'does not call AuthorizedProjectUpdate::PeriodicRecalculateService' do
- expect(AuthorizedProjectUpdate::PeriodicRecalculateService).not_to receive(:new)
-
- subject.perform
- end
- end
end
end
diff --git a/spec/workers/authorized_project_update/user_refresh_over_user_range_worker_spec.rb b/spec/workers/authorized_project_update/user_refresh_over_user_range_worker_spec.rb
index c49e4c453bf..a27c431523e 100644
--- a/spec/workers/authorized_project_update/user_refresh_over_user_range_worker_spec.rb
+++ b/spec/workers/authorized_project_update/user_refresh_over_user_range_worker_spec.rb
@@ -14,17 +14,5 @@ RSpec.describe AuthorizedProjectUpdate::UserRefreshOverUserRangeWorker do
subject.perform(start_user_id, end_user_id)
end
-
- context 'feature flag :periodic_project_authorization_recalculation is disabled' do
- before do
- stub_feature_flags(periodic_project_authorization_recalculation: false)
- end
-
- it 'does not call AuthorizedProjectUpdate::RecalculateForUserRangeService' do
- expect(AuthorizedProjectUpdate::RecalculateForUserRangeService).not_to receive(:new)
-
- subject.perform(start_user_id, end_user_id)
- end
- end
end
end
diff --git a/spec/workers/build_finished_worker_spec.rb b/spec/workers/build_finished_worker_spec.rb
index e7f7ae84621..11b50961e9e 100644
--- a/spec/workers/build_finished_worker_spec.rb
+++ b/spec/workers/build_finished_worker_spec.rb
@@ -20,10 +20,10 @@ RSpec.describe BuildFinishedWorker do
expect_any_instance_of(BuildTraceSectionsWorker).to receive(:perform)
expect_any_instance_of(BuildCoverageWorker).to receive(:perform)
expect(BuildHooksWorker).to receive(:perform_async)
- expect(ArchiveTraceWorker).to receive(:perform_async)
expect(ExpirePipelineCacheWorker).to receive(:perform_async)
expect(ChatNotificationWorker).not_to receive(:perform_async)
expect(Ci::BuildReportResultWorker).not_to receive(:perform)
+ expect(ArchiveTraceWorker).to receive(:perform_in)
subject
end
diff --git a/spec/workers/ci/build_trace_chunk_flush_worker_spec.rb b/spec/workers/ci/build_trace_chunk_flush_worker_spec.rb
index 352ad6d4cf6..8aac80a02be 100644
--- a/spec/workers/ci/build_trace_chunk_flush_worker_spec.rb
+++ b/spec/workers/ci/build_trace_chunk_flush_worker_spec.rb
@@ -14,7 +14,7 @@ RSpec.describe Ci::BuildTraceChunkFlushWorker do
described_class.new.perform(chunk.id)
- expect(chunk.reload).to be_persisted
+ expect(chunk.reload).to be_migrated
end
describe '#perform' do
@@ -24,7 +24,7 @@ RSpec.describe Ci::BuildTraceChunkFlushWorker do
it 'migrates build trace chunk to a safe store' do
subject
- expect(chunk.reload).to be_persisted
+ expect(chunk.reload).to be_migrated
end
end
end
diff --git a/spec/workers/ci/delete_objects_worker_spec.rb b/spec/workers/ci/delete_objects_worker_spec.rb
new file mode 100644
index 00000000000..6cb8e0cba37
--- /dev/null
+++ b/spec/workers/ci/delete_objects_worker_spec.rb
@@ -0,0 +1,49 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Ci::DeleteObjectsWorker do
+ let(:worker) { described_class.new }
+
+ it { expect(described_class.idempotent?).to be_truthy }
+
+ describe '#perform' do
+ it 'executes a service' do
+ expect_next_instance_of(Ci::DeleteObjectsService) do |instance|
+ expect(instance).to receive(:execute)
+ expect(instance).to receive(:remaining_batches_count).once.and_call_original
+ end
+
+ worker.perform
+ end
+ end
+
+ describe '#max_running_jobs' do
+ using RSpec::Parameterized::TableSyntax
+
+ before do
+ stub_feature_flags(
+ ci_delete_objects_low_concurrency: low,
+ ci_delete_objects_medium_concurrency: medium,
+ ci_delete_objects_high_concurrency: high
+ )
+ end
+
+ subject(:max_running_jobs) { worker.max_running_jobs }
+
+ where(:low, :medium, :high, :expected) do
+ false | false | false | 0
+ true | true | true | 2
+ true | false | false | 2
+ false | true | false | 20
+ false | true | true | 20
+ false | false | true | 50
+ end
+
+ with_them do
+ it 'sets up concurrency depending on the feature flag' do
+ expect(max_running_jobs).to eq(expected)
+ end
+ end
+ end
+end
diff --git a/spec/workers/ci/schedule_delete_objects_cron_worker_spec.rb b/spec/workers/ci/schedule_delete_objects_cron_worker_spec.rb
new file mode 100644
index 00000000000..142df271f90
--- /dev/null
+++ b/spec/workers/ci/schedule_delete_objects_cron_worker_spec.rb
@@ -0,0 +1,15 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Ci::ScheduleDeleteObjectsCronWorker do
+ let(:worker) { described_class.new }
+
+ describe '#perform' do
+ it 'enqueues DeleteObjectsWorker jobs' do
+ expect(Ci::DeleteObjectsWorker).to receive(:perform_with_capacity)
+
+ worker.perform
+ end
+ end
+end
diff --git a/spec/workers/cleanup_container_repository_worker_spec.rb b/spec/workers/cleanup_container_repository_worker_spec.rb
index 0545f7a35e4..9cf8974a2a1 100644
--- a/spec/workers/cleanup_container_repository_worker_spec.rb
+++ b/spec/workers/cleanup_container_repository_worker_spec.rb
@@ -40,14 +40,35 @@ RSpec.describe CleanupContainerRepositoryWorker, :clean_gitlab_redis_shared_stat
context 'container expiration policy' do
let(:params) { { key: 'value', 'container_expiration_policy' => true } }
+ before do
+ allow(ContainerRepository)
+ .to receive(:find_by_id).with(repository.id).and_return(repository)
+ end
+
it 'executes the destroy service' do
+ expect(repository).to receive(:start_expiration_policy!).and_call_original
+ expect(repository).to receive(:reset_expiration_policy_started_at!).and_call_original
expect(Projects::ContainerRepository::CleanupTagsService).to receive(:new)
.with(project, nil, params.merge('container_expiration_policy' => true))
.and_return(service)
- expect(service).to receive(:execute)
+ expect(service).to receive(:execute).and_return(status: :success)
+
+ subject.perform(nil, repository.id, params)
+ expect(repository.reload.expiration_policy_started_at).to be_nil
+ end
+
+ it "doesn't reset the expiration policy started at if the destroy service returns an error" do
+ expect(repository).to receive(:start_expiration_policy!).and_call_original
+ expect(repository).not_to receive(:reset_expiration_policy_started_at!)
+ expect(Projects::ContainerRepository::CleanupTagsService).to receive(:new)
+ .with(project, nil, params.merge('container_expiration_policy' => true))
+ .and_return(service)
+
+ expect(service).to receive(:execute).and_return(status: :error, message: 'timeout while deleting tags')
subject.perform(nil, repository.id, params)
+ expect(repository.reload.expiration_policy_started_at).not_to be_nil
end
end
end
diff --git a/spec/workers/concerns/limited_capacity/job_tracker_spec.rb b/spec/workers/concerns/limited_capacity/job_tracker_spec.rb
new file mode 100644
index 00000000000..2c79f347903
--- /dev/null
+++ b/spec/workers/concerns/limited_capacity/job_tracker_spec.rb
@@ -0,0 +1,100 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe LimitedCapacity::JobTracker, :clean_gitlab_redis_queues do
+ let(:job_tracker) do
+ described_class.new('namespace')
+ end
+
+ describe '#register' do
+ it 'adds jid to the set' do
+ job_tracker.register('a-job-id')
+
+ expect(job_tracker.running_jids).to contain_exactly('a-job-id')
+ end
+
+ it 'updates the counter' do
+ expect { job_tracker.register('a-job-id') }
+ .to change { job_tracker.count }
+ .from(0)
+ .to(1)
+ end
+
+ it 'does it in only one Redis call' do
+ expect(job_tracker).to receive(:with_redis).once.and_call_original
+
+ job_tracker.register('a-job-id')
+ end
+ end
+
+ describe '#remove' do
+ before do
+ job_tracker.register(%w[a-job-id other-job-id])
+ end
+
+ it 'removes jid from the set' do
+ job_tracker.remove('other-job-id')
+
+ expect(job_tracker.running_jids).to contain_exactly('a-job-id')
+ end
+
+ it 'updates the counter' do
+ expect { job_tracker.remove('other-job-id') }
+ .to change { job_tracker.count }
+ .from(2)
+ .to(1)
+ end
+
+ it 'does it in only one Redis call' do
+ expect(job_tracker).to receive(:with_redis).once.and_call_original
+
+ job_tracker.remove('other-job-id')
+ end
+ end
+
+ describe '#clean_up' do
+ before do
+ job_tracker.register('a-job-id')
+ end
+
+ context 'with running jobs' do
+ before do
+ expect(Gitlab::SidekiqStatus).to receive(:completed_jids)
+ .with(%w[a-job-id])
+ .and_return([])
+ end
+
+ it 'does not remove the jid from the set' do
+ expect { job_tracker.clean_up }
+ .not_to change { job_tracker.running_jids.include?('a-job-id') }
+ end
+
+ it 'does only one Redis call to get the job ids' do
+ expect(job_tracker).to receive(:with_redis).once.and_call_original
+
+ job_tracker.clean_up
+ end
+ end
+
+ context 'with completed jobs' do
+ it 'removes the jid from the set' do
+ expect { job_tracker.clean_up }
+ .to change { job_tracker.running_jids.include?('a-job-id') }
+ end
+
+ it 'updates the counter' do
+ expect { job_tracker.clean_up }
+ .to change { job_tracker.count }
+ .from(1)
+ .to(0)
+ end
+
+ it 'gets the job ids, removes them, and updates the counter with only two Redis calls' do
+ expect(job_tracker).to receive(:with_redis).twice.and_call_original
+
+ job_tracker.clean_up
+ end
+ end
+ end
+end
diff --git a/spec/workers/concerns/limited_capacity/worker_spec.rb b/spec/workers/concerns/limited_capacity/worker_spec.rb
new file mode 100644
index 00000000000..8a15675c04d
--- /dev/null
+++ b/spec/workers/concerns/limited_capacity/worker_spec.rb
@@ -0,0 +1,285 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe LimitedCapacity::Worker, :clean_gitlab_redis_queues, :aggregate_failures do
+ let(:worker_class) do
+ Class.new do
+ def self.name
+ 'DummyWorker'
+ end
+
+ include ApplicationWorker
+ include LimitedCapacity::Worker
+ end
+ end
+
+ let(:worker) { worker_class.new }
+
+ let(:job_tracker) do
+ LimitedCapacity::JobTracker.new(worker_class.name)
+ end
+
+ before do
+ worker.jid = 'my-jid'
+ allow(worker).to receive(:job_tracker).and_return(job_tracker)
+ end
+
+ describe 'required methods' do
+ it { expect { worker.perform_work }.to raise_error(NotImplementedError) }
+ it { expect { worker.remaining_work_count }.to raise_error(NotImplementedError) }
+ it { expect { worker.max_running_jobs }.to raise_error(NotImplementedError) }
+ end
+
+ describe 'Sidekiq options' do
+ it 'does not retry failed jobs' do
+ expect(worker_class.sidekiq_options['retry']).to eq(0)
+ end
+
+ it 'does not deduplicate jobs' do
+ expect(worker_class.get_deduplicate_strategy).to eq(:none)
+ end
+ end
+
+ describe '.perform_with_capacity' do
+ subject(:perform_with_capacity) { worker_class.perform_with_capacity(:arg) }
+
+ before do
+ expect_next_instance_of(worker_class) do |instance|
+ expect(instance).to receive(:remove_failed_jobs)
+ expect(instance).to receive(:report_prometheus_metrics)
+
+ allow(instance).to receive(:remaining_work_count).and_return(remaining_work_count)
+ allow(instance).to receive(:remaining_capacity).and_return(remaining_capacity)
+ end
+ end
+
+ context 'when capacity is larger than work' do
+ let(:remaining_work_count) { 2 }
+ let(:remaining_capacity) { 3 }
+
+ it 'enqueues jobs for remaining work' do
+ expect(worker_class)
+ .to receive(:bulk_perform_async)
+ .with([[:arg], [:arg]])
+
+ perform_with_capacity
+ end
+ end
+
+ context 'when capacity is lower than work' do
+ let(:remaining_work_count) { 5 }
+ let(:remaining_capacity) { 3 }
+
+ it 'enqueues jobs for remaining work' do
+ expect(worker_class)
+ .to receive(:bulk_perform_async)
+ .with([[:arg], [:arg], [:arg]])
+
+ perform_with_capacity
+ end
+ end
+ end
+
+ describe '#perform' do
+ subject(:perform) { worker.perform(:arg) }
+
+ context 'with capacity' do
+ before do
+ allow(worker).to receive(:max_running_jobs).and_return(10)
+ allow(worker).to receive(:running_jobs_count).and_return(0)
+ allow(worker).to receive(:remaining_work_count).and_return(0)
+ end
+
+ it 'calls perform_work' do
+ expect(worker).to receive(:perform_work).with(:arg)
+
+ perform
+ end
+
+ it 're-enqueues itself' do
+ allow(worker).to receive(:perform_work)
+ expect(worker).to receive(:re_enqueue).with(:arg)
+
+ perform
+ end
+
+ it 'registers itself in the running set' do
+ allow(worker).to receive(:perform_work)
+ expect(job_tracker).to receive(:register).with('my-jid')
+
+ perform
+ end
+
+ it 'removes itself from the running set' do
+ expect(job_tracker).to receive(:remove).with('my-jid')
+
+ allow(worker).to receive(:perform_work)
+
+ perform
+ end
+
+ it 'reports prometheus metrics' do
+ allow(worker).to receive(:perform_work)
+ expect(worker).to receive(:report_prometheus_metrics)
+
+ perform
+ end
+ end
+
+ context 'with capacity and without work' do
+ before do
+ allow(worker).to receive(:max_running_jobs).and_return(10)
+ allow(worker).to receive(:running_jobs_count).and_return(0)
+ allow(worker).to receive(:remaining_work_count).and_return(0)
+ allow(worker).to receive(:perform_work)
+ end
+
+ it 'does not re-enqueue itself' do
+ expect(worker_class).not_to receive(:perform_async)
+
+ perform
+ end
+ end
+
+ context 'without capacity' do
+ before do
+ allow(worker).to receive(:max_running_jobs).and_return(10)
+ allow(worker).to receive(:running_jobs_count).and_return(15)
+ allow(worker).to receive(:remaining_work_count).and_return(10)
+ end
+
+ it 'does not call perform_work' do
+ expect(worker).not_to receive(:perform_work)
+
+ perform
+ end
+
+ it 'does not re-enqueue itself' do
+ expect(worker_class).not_to receive(:perform_async)
+
+ perform
+ end
+
+ it 'does not register in the running set' do
+ expect(job_tracker).not_to receive(:register)
+
+ perform
+ end
+
+ it 'removes itself from the running set' do
+ expect(job_tracker).to receive(:remove).with('my-jid')
+
+ perform
+ end
+
+ it 'reports prometheus metrics' do
+ expect(worker).to receive(:report_prometheus_metrics)
+
+ perform
+ end
+ end
+
+ context 'when perform_work fails' do
+ it 'does not re-enqueue itself' do
+ expect(worker).not_to receive(:re_enqueue)
+
+ expect { perform }.to raise_error(NotImplementedError)
+ end
+
+ it 'removes itself from the running set' do
+ expect(job_tracker).to receive(:remove)
+
+ expect { perform }.to raise_error(NotImplementedError)
+ end
+
+ it 'reports prometheus metrics' do
+ expect(worker).to receive(:report_prometheus_metrics)
+
+ expect { perform }.to raise_error(NotImplementedError)
+ end
+ end
+ end
+
+ describe '#remaining_capacity' do
+ subject(:remaining_capacity) { worker.remaining_capacity }
+
+ before do
+ expect(worker).to receive(:max_running_jobs).and_return(max_capacity)
+ end
+
+ context 'when changing the capacity to a lower value' do
+ let(:max_capacity) { -1 }
+
+ it { expect(remaining_capacity).to eq(0) }
+ end
+
+ context 'when registering new jobs' do
+ let(:max_capacity) { 2 }
+
+ before do
+ job_tracker.register('a-job-id')
+ end
+
+ it { expect(remaining_capacity).to eq(1) }
+ end
+
+ context 'with jobs in the queue' do
+ let(:max_capacity) { 2 }
+
+ before do
+ expect(worker_class).to receive(:queue_size).and_return(1)
+ end
+
+ it { expect(remaining_capacity).to eq(1) }
+ end
+
+ context 'with both running jobs and queued jobs' do
+ let(:max_capacity) { 10 }
+
+ before do
+ expect(worker_class).to receive(:queue_size).and_return(5)
+ expect(worker).to receive(:running_jobs_count).and_return(3)
+ end
+
+ it { expect(remaining_capacity).to eq(2) }
+ end
+ end
+
+ describe '#remove_failed_jobs' do
+ subject(:remove_failed_jobs) { worker.remove_failed_jobs }
+
+ before do
+ job_tracker.register('a-job-id')
+ allow(worker).to receive(:max_running_jobs).and_return(2)
+
+ expect(job_tracker).to receive(:clean_up).and_call_original
+ end
+
+ context 'with failed jobs' do
+ it 'update the available capacity' do
+ expect { remove_failed_jobs }.to change { worker.remaining_capacity }.by(1)
+ end
+ end
+ end
+
+ describe '#report_prometheus_metrics' do
+ subject(:report_prometheus_metrics) { worker.report_prometheus_metrics }
+
+ before do
+ allow(worker).to receive(:running_jobs_count).and_return(5)
+ allow(worker).to receive(:max_running_jobs).and_return(7)
+ allow(worker).to receive(:remaining_work_count).and_return(9)
+ end
+
+ it 'reports number of running jobs' do
+ labels = { worker: 'DummyWorker' }
+
+ report_prometheus_metrics
+
+ expect(Gitlab::Metrics.registry.get(:limited_capacity_worker_running_jobs).get(labels)).to eq(5)
+ expect(Gitlab::Metrics.registry.get(:limited_capacity_worker_max_running_jobs).get(labels)).to eq(7)
+ expect(Gitlab::Metrics.registry.get(:limited_capacity_worker_remaining_work_count).get(labels)).to eq(9)
+ end
+ end
+end
diff --git a/spec/workers/container_expiration_policy_worker_spec.rb b/spec/workers/container_expiration_policy_worker_spec.rb
index 868eb6b192e..6b185c30670 100644
--- a/spec/workers/container_expiration_policy_worker_spec.rb
+++ b/spec/workers/container_expiration_policy_worker_spec.rb
@@ -7,19 +7,24 @@ RSpec.describe ContainerExpirationPolicyWorker do
subject { described_class.new.perform }
- context 'With no container expiration policies' do
- it 'Does not execute any policies' do
+ RSpec.shared_examples 'not executing any policy' do
+ it 'does not run any policy' do
expect(ContainerExpirationPolicyService).not_to receive(:new)
subject
end
end
+ context 'With no container expiration policies' do
+ it_behaves_like 'not executing any policy'
+ end
+
context 'With container expiration policies' do
- context 'a valid policy' do
- let!(:container_expiration_policy) { create(:container_expiration_policy, :runnable) }
- let(:user) { container_expiration_policy.project.owner }
+ let_it_be(:container_expiration_policy, reload: true) { create(:container_expiration_policy, :runnable) }
+ let_it_be(:container_repository) { create(:container_repository, project: container_expiration_policy.project) }
+ let_it_be(:user) { container_expiration_policy.project.owner }
+ context 'a valid policy' do
it 'runs the policy' do
service = instance_double(ContainerExpirationPolicyService, execute: true)
@@ -31,33 +36,30 @@ RSpec.describe ContainerExpirationPolicyWorker do
end
context 'a disabled policy' do
- let!(:container_expiration_policy) { create(:container_expiration_policy, :runnable, :disabled) }
- let(:user) {container_expiration_policy.project.owner }
-
- it 'does not run the policy' do
- expect(ContainerExpirationPolicyService)
- .not_to receive(:new).with(container_expiration_policy, user)
-
- subject
+ before do
+ container_expiration_policy.disable!
end
+
+ it_behaves_like 'not executing any policy'
end
context 'a policy that is not due for a run' do
- let!(:container_expiration_policy) { create(:container_expiration_policy) }
- let(:user) {container_expiration_policy.project.owner }
+ before do
+ container_expiration_policy.update_column(:next_run_at, 2.minutes.from_now)
+ end
- it 'does not run the policy' do
- expect(ContainerExpirationPolicyService)
- .not_to receive(:new).with(container_expiration_policy, user)
+ it_behaves_like 'not executing any policy'
+ end
- subject
+ context 'a policy linked to no container repository' do
+ before do
+ container_expiration_policy.container_repositories.delete_all
end
+
+ it_behaves_like 'not executing any policy'
end
context 'an invalid policy' do
- let_it_be(:container_expiration_policy) { create(:container_expiration_policy, :runnable) }
- let_it_be(:user) {container_expiration_policy.project.owner }
-
before do
container_expiration_policy.update_column(:name_regex, '*production')
end
diff --git a/spec/workers/deployments/drop_older_deployments_worker_spec.rb b/spec/workers/deployments/drop_older_deployments_worker_spec.rb
new file mode 100644
index 00000000000..0cf524ca16f
--- /dev/null
+++ b/spec/workers/deployments/drop_older_deployments_worker_spec.rb
@@ -0,0 +1,18 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Deployments::DropOlderDeploymentsWorker do
+ subject { described_class.new.perform(deployment&.id) }
+
+ describe '#perform' do
+ let(:deployment) { create(:deployment, :success) }
+
+ it 'executes Deployments::OlderDeploymentsDropService' do
+ expect(Deployments::OlderDeploymentsDropService)
+ .to receive(:new).with(deployment.id).and_call_original
+
+ subject
+ end
+ end
+end
diff --git a/spec/workers/deployments/execute_hooks_worker_spec.rb b/spec/workers/deployments/execute_hooks_worker_spec.rb
new file mode 100644
index 00000000000..fb1dc8cf290
--- /dev/null
+++ b/spec/workers/deployments/execute_hooks_worker_spec.rb
@@ -0,0 +1,51 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Deployments::ExecuteHooksWorker do
+ let(:worker) { described_class.new }
+
+ describe '#perform' do
+ before do
+ allow(ProjectServiceWorker).to receive(:perform_async)
+ end
+
+ it 'executes project services for deployment_hooks' do
+ deployment = create(:deployment, :running)
+ project = deployment.project
+ service = create(:service, type: 'SlackService', project: project, deployment_events: true, active: true)
+
+ expect(ProjectServiceWorker).to receive(:perform_async).with(service.id, an_instance_of(Hash))
+
+ worker.perform(deployment.id)
+ end
+
+ it 'does not execute an inactive service' do
+ deployment = create(:deployment, :running)
+ project = deployment.project
+ create(:service, type: 'SlackService', project: project, deployment_events: true, active: false)
+
+ expect(ProjectServiceWorker).not_to receive(:perform_async)
+
+ worker.perform(deployment.id)
+ end
+
+ it 'does not execute if a deployment does not exist' do
+ expect(ProjectServiceWorker).not_to receive(:perform_async)
+
+ worker.perform(non_existing_record_id)
+ end
+
+ it 'execute webhooks' do
+ deployment = create(:deployment, :running)
+ project = deployment.project
+ web_hook = create(:project_hook, deployment_events: true, project: project)
+
+ expect_next_instance_of(WebHookService, web_hook, an_instance_of(Hash), "deployment_hooks") do |service|
+ expect(service).to receive(:async_execute)
+ end
+
+ worker.perform(deployment.id)
+ end
+ end
+end
diff --git a/spec/workers/deployments/link_merge_request_worker_spec.rb b/spec/workers/deployments/link_merge_request_worker_spec.rb
new file mode 100644
index 00000000000..a55dd897bc7
--- /dev/null
+++ b/spec/workers/deployments/link_merge_request_worker_spec.rb
@@ -0,0 +1,71 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Deployments::LinkMergeRequestWorker do
+ subject(:worker) { described_class.new }
+
+ describe '#perform' do
+ it 'links merge requests to the deployment' do
+ deployment = create(:deployment)
+ service = instance_double(Deployments::LinkMergeRequestsService)
+
+ expect(Deployments::LinkMergeRequestsService)
+ .to receive(:new)
+ .with(deployment)
+ .and_return(service)
+
+ expect(service).to receive(:execute)
+
+ worker.perform(deployment.id)
+ end
+
+ it 'does not link merge requests when the deployment is not found' do
+ expect(Deployments::LinkMergeRequestsService).not_to receive(:new)
+
+ worker.perform(non_existing_record_id)
+ end
+ end
+
+ context 'idempotent' do
+ include_examples 'an idempotent worker' do
+ let(:project) { create(:project, :repository) }
+ let(:environment) { create(:environment, project: project) }
+ let(:deployment) { create(:deployment, :success, project: project, environment: environment) }
+ let(:job_args) { deployment.id }
+
+ it 'links merge requests to deployment' do
+ mr1 = create(
+ :merge_request,
+ :merged,
+ source_project: project,
+ target_project: project,
+ source_branch: 'source1',
+ target_branch: deployment.ref
+ )
+
+ mr2 = create(
+ :merge_request,
+ :merged,
+ source_project: project,
+ target_project: project,
+ source_branch: 'source2',
+ target_branch: deployment.ref
+ )
+
+ mr3 = create(
+ :merge_request,
+ :merged,
+ source_project: project,
+ target_project: project,
+ target_branch: 'foo'
+ )
+
+ subject
+
+ expect(deployment.merge_requests).to include(mr1, mr2)
+ expect(deployment.merge_requests).not_to include(mr3)
+ end
+ end
+ end
+end
diff --git a/spec/workers/deployments/success_worker_spec.rb b/spec/workers/deployments/success_worker_spec.rb
index 7c21a3147a7..d9996e66919 100644
--- a/spec/workers/deployments/success_worker_spec.rb
+++ b/spec/workers/deployments/success_worker_spec.rb
@@ -8,8 +8,8 @@ RSpec.describe Deployments::SuccessWorker do
context 'when successful deployment' do
let(:deployment) { create(:deployment, :success) }
- it 'executes Deployments::AfterCreateService' do
- expect(Deployments::AfterCreateService)
+ it 'executes Deployments::UpdateEnvironmentService' do
+ expect(Deployments::UpdateEnvironmentService)
.to receive(:new).with(deployment).and_call_original
subject
@@ -19,8 +19,8 @@ RSpec.describe Deployments::SuccessWorker do
context 'when canceled deployment' do
let(:deployment) { create(:deployment, :canceled) }
- it 'does not execute Deployments::AfterCreateService' do
- expect(Deployments::AfterCreateService).not_to receive(:new)
+ it 'does not execute Deployments::UpdateEnvironmentService' do
+ expect(Deployments::UpdateEnvironmentService).not_to receive(:new)
subject
end
@@ -29,8 +29,8 @@ RSpec.describe Deployments::SuccessWorker do
context 'when deploy record does not exist' do
let(:deployment) { nil }
- it 'does not execute Deployments::AfterCreateService' do
- expect(Deployments::AfterCreateService).not_to receive(:new)
+ it 'does not execute Deployments::UpdateEnvironmentService' do
+ expect(Deployments::UpdateEnvironmentService).not_to receive(:new)
subject
end
diff --git a/spec/workers/deployments/update_environment_worker_spec.rb b/spec/workers/deployments/update_environment_worker_spec.rb
new file mode 100644
index 00000000000..d67cbd62616
--- /dev/null
+++ b/spec/workers/deployments/update_environment_worker_spec.rb
@@ -0,0 +1,63 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Deployments::UpdateEnvironmentWorker do
+ subject(:worker) { described_class.new }
+
+ context 'when successful deployment' do
+ let(:deployment) { create(:deployment, :success) }
+
+ it 'executes Deployments::UpdateEnvironmentService' do
+ service = instance_double(Deployments::UpdateEnvironmentService)
+
+ expect(Deployments::UpdateEnvironmentService)
+ .to receive(:new)
+ .with(deployment)
+ .and_return(service)
+
+ expect(service).to receive(:execute)
+
+ worker.perform(deployment.id)
+ end
+ end
+
+ context 'when canceled deployment' do
+ let(:deployment) { create(:deployment, :canceled) }
+
+ it 'does not execute Deployments::UpdateEnvironmentService' do
+ expect(Deployments::UpdateEnvironmentService).not_to receive(:new)
+
+ worker.perform(deployment.id)
+ end
+ end
+
+ context 'when deploy record does not exist' do
+ it 'does not execute Deployments::UpdateEnvironmentService' do
+ expect(Deployments::UpdateEnvironmentService).not_to receive(:new)
+
+ worker.perform(non_existing_record_id)
+ end
+ end
+
+ context 'idempotent' do
+ include_examples 'an idempotent worker' do
+ let(:project) { create(:project, :repository) }
+ let(:environment) { create(:environment, name: 'production') }
+ let(:deployment) { create(:deployment, :success, project: project, environment: environment) }
+ let(:merge_request) { create(:merge_request, target_branch: 'master', source_branch: 'feature', source_project: project) }
+ let(:job_args) { deployment.id }
+
+ before do
+ merge_request.metrics.update!(merged_at: 1.hour.ago)
+ end
+
+ it 'updates merge requests metrics' do
+ subject
+
+ expect(merge_request.reload.metrics.first_deployed_to_production_at)
+ .to be_like_time(deployment.finished_at)
+ end
+ end
+ end
+end
diff --git a/spec/workers/design_management/copy_design_collection_worker_spec.rb b/spec/workers/design_management/copy_design_collection_worker_spec.rb
new file mode 100644
index 00000000000..45bfc47ca7e
--- /dev/null
+++ b/spec/workers/design_management/copy_design_collection_worker_spec.rb
@@ -0,0 +1,39 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe DesignManagement::CopyDesignCollectionWorker, :clean_gitlab_redis_shared_state do
+ describe '#perform' do
+ let_it_be(:user) { create(:user) }
+ let_it_be(:issue) { create(:issue) }
+ let_it_be(:target_issue) { create(:issue) }
+
+ subject { described_class.new.perform(user.id, issue.id, target_issue.id) }
+
+ it_behaves_like 'an idempotent worker' do
+ let(:job_args) { [user.id, issue.id, target_issue.id] }
+
+ specify { subject }
+ end
+
+ it 'calls DesignManagement::CopyDesignCollection::CopyService' do
+ expect_next_instance_of(DesignManagement::CopyDesignCollection::CopyService) do |service|
+ expect(service).to receive(:execute).and_return(ServiceResponse.success)
+ end
+
+ subject
+ end
+
+ it 'logs if there was an error calling the service' do
+ message = 'Error message'
+
+ allow_next_instance_of(DesignManagement::CopyDesignCollection::CopyService) do |service|
+ allow(service).to receive(:execute).and_return(ServiceResponse.error(message: message))
+ end
+
+ expect(Gitlab::AppLogger).to receive(:warn).with(message)
+
+ subject
+ end
+ end
+end
diff --git a/spec/workers/design_management/new_version_worker_spec.rb b/spec/workers/design_management/new_version_worker_spec.rb
index 4d57c46487e..3320d7a062d 100644
--- a/spec/workers/design_management/new_version_worker_spec.rb
+++ b/spec/workers/design_management/new_version_worker_spec.rb
@@ -36,6 +36,10 @@ RSpec.describe DesignManagement::NewVersionWorker do
expect { worker.perform(version.id) }.to change { Note.system.count }.by(1)
end
+ it 'does not create a system note if skip_system_notes is true' do
+ expect { worker.perform(version.id, true) }.not_to change { Note.system.count }
+ end
+
it 'invokes GenerateImageVersionsService' do
expect_next_instance_of(DesignManagement::GenerateImageVersionsService) do |service|
expect(service).to receive(:execute)
diff --git a/spec/workers/disallow_two_factor_for_group_worker_spec.rb b/spec/workers/disallow_two_factor_for_group_worker_spec.rb
new file mode 100644
index 00000000000..a69dd893f81
--- /dev/null
+++ b/spec/workers/disallow_two_factor_for_group_worker_spec.rb
@@ -0,0 +1,22 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe DisallowTwoFactorForGroupWorker do
+ let_it_be(:group) { create(:group, require_two_factor_authentication: true) }
+ let_it_be(:user) { create(:user, require_two_factor_authentication_from_group: true) }
+
+ it "updates group" do
+ described_class.new.perform(group.id)
+
+ expect(group.reload.require_two_factor_authentication).to eq(false)
+ end
+
+ it "updates group members" do
+ group.add_user(user, GroupMember::DEVELOPER)
+
+ described_class.new.perform(group.id)
+
+ expect(user.reload.require_two_factor_authentication_from_group).to eq(false)
+ end
+end
diff --git a/spec/workers/disallow_two_factor_for_subgroups_worker_spec.rb b/spec/workers/disallow_two_factor_for_subgroups_worker_spec.rb
new file mode 100644
index 00000000000..c3be8263171
--- /dev/null
+++ b/spec/workers/disallow_two_factor_for_subgroups_worker_spec.rb
@@ -0,0 +1,17 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe DisallowTwoFactorForSubgroupsWorker do
+ let_it_be(:group) { create(:group) }
+ let_it_be(:subgroup_with_2fa) { create(:group, parent: group, require_two_factor_authentication: true) }
+ let_it_be(:subgroup_without_2fa) { create(:group, parent: group, require_two_factor_authentication: false) }
+ let_it_be(:subsubgroup_with_2fa) { create(:group, parent: subgroup_with_2fa, require_two_factor_authentication: true) }
+
+ it "schedules updating subgroups" do
+ expect(DisallowTwoFactorForGroupWorker).to receive(:perform_in).with(0, subgroup_with_2fa.id)
+ expect(DisallowTwoFactorForGroupWorker).to receive(:perform_in).with(2, subsubgroup_with_2fa.id)
+
+ described_class.new.perform(group.id)
+ end
+end
diff --git a/spec/workers/export_csv_worker_spec.rb b/spec/workers/export_csv_worker_spec.rb
index 1a5b17ee35b..88ccfac0a02 100644
--- a/spec/workers/export_csv_worker_spec.rb
+++ b/spec/workers/export_csv_worker_spec.rb
@@ -10,25 +10,9 @@ RSpec.describe ExportCsvWorker do
described_class.new.perform(user.id, project.id, params)
end
- it 'emails a CSV' do
- expect {perform}.to change(ActionMailer::Base.deliveries, :size).by(1)
- end
-
- it 'ensures that project_id is passed to issues_finder' do
- expect(IssuesFinder).to receive(:new).with(anything, hash_including(project_id: project.id)).and_call_original
+ it 'delegates call to IssuableExportCsvWorker' do
+ expect(IssuableExportCsvWorker).to receive(:perform_async).with(:issue, user.id, project.id, anything)
perform
end
-
- it 'removes sort parameter' do
- expect(IssuesFinder).to receive(:new).with(anything, hash_not_including(:sort)).and_call_original
-
- perform
- end
-
- it 'converts controller string keys to symbol keys for IssuesFinder' do
- expect(IssuesFinder).to receive(:new).with(anything, hash_including(test_key: true)).and_call_original
-
- perform('test_key' => true)
- end
end
diff --git a/spec/workers/git_garbage_collect_worker_spec.rb b/spec/workers/git_garbage_collect_worker_spec.rb
index 1be6e86b650..fc9115a5ea1 100644
--- a/spec/workers/git_garbage_collect_worker_spec.rb
+++ b/spec/workers/git_garbage_collect_worker_spec.rb
@@ -56,7 +56,7 @@ RSpec.describe GitGarbageCollectWorker do
it "flushes ref caches when the task if 'gc'" do
expect(subject).to receive(:renew_lease).with(lease_key, lease_uuid).and_call_original
- expect_any_instance_of(Repository).to receive(:after_create_branch).and_call_original
+ expect_any_instance_of(Repository).to receive(:expire_branches_cache).and_call_original
expect_any_instance_of(Repository).to receive(:branch_names).and_call_original
expect_any_instance_of(Repository).to receive(:has_visible_content?).and_call_original
expect_any_instance_of(Gitlab::Git::Repository).to receive(:has_visible_content?).and_call_original
@@ -77,7 +77,7 @@ RSpec.describe GitGarbageCollectWorker do
end
it 'returns silently' do
- expect_any_instance_of(Repository).not_to receive(:after_create_branch).and_call_original
+ expect_any_instance_of(Repository).not_to receive(:expire_branches_cache).and_call_original
expect_any_instance_of(Repository).not_to receive(:branch_names).and_call_original
expect_any_instance_of(Repository).not_to receive(:has_visible_content?).and_call_original
@@ -102,7 +102,7 @@ RSpec.describe GitGarbageCollectWorker do
it "flushes ref caches when the task if 'gc'" do
expect(subject).to receive(:get_lease_uuid).with("git_gc:#{task}:#{project.id}").and_return(false)
- expect_any_instance_of(Repository).to receive(:after_create_branch).and_call_original
+ expect_any_instance_of(Repository).to receive(:expire_branches_cache).and_call_original
expect_any_instance_of(Repository).to receive(:branch_names).and_call_original
expect_any_instance_of(Repository).to receive(:has_visible_content?).and_call_original
expect_any_instance_of(Gitlab::Git::Repository).to receive(:has_visible_content?).and_call_original
@@ -129,46 +129,36 @@ RSpec.describe GitGarbageCollectWorker do
let_it_be(:lfs_reference) { create(:lfs_objects_project, project: project) }
let(:lfs_object) { lfs_reference.lfs_object }
- context 'with cleanup_lfs_during_gc feature flag enabled' do
- before do
- stub_feature_flags(cleanup_lfs_during_gc: true)
+ it 'cleans up unreferenced LFS objects' do
+ expect_next_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences) do |svc|
+ expect(svc.project).to eq(project)
+ expect(svc.dry_run).to be_falsy
+ expect(svc).to receive(:run!).and_call_original
end
- it 'cleans up unreferenced LFS objects' do
- expect_next_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences) do |svc|
- expect(svc.project).to eq(project)
- expect(svc.dry_run).to be_falsy
- expect(svc).to receive(:run!).and_call_original
- end
-
- subject.perform(*params)
-
- expect(project.lfs_objects.reload).not_to include(lfs_object)
- end
+ subject.perform(*params)
- it 'does nothing if the database is read-only' do
- allow(Gitlab::Database).to receive(:read_only?) { true }
+ expect(project.lfs_objects.reload).not_to include(lfs_object)
+ end
- expect_any_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences).not_to receive(:run!)
+ it 'catches and logs exceptions' do
+ expect_any_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences)
+ .to receive(:run!)
+ .and_raise(/Failed/)
- subject.perform(*params)
+ expect(Gitlab::GitLogger).to receive(:warn)
+ expect(Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception)
- expect(project.lfs_objects.reload).to include(lfs_object)
- end
+ subject.perform(*params)
end
- context 'with cleanup_lfs_during_gc feature flag disabled' do
- before do
- stub_feature_flags(cleanup_lfs_during_gc: false)
- end
-
- it 'does not clean up unreferenced LFS objects' do
- expect_any_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences).not_to receive(:run!)
+ it 'does nothing if the database is read-only' do
+ allow(Gitlab::Database).to receive(:read_only?) { true }
+ expect_any_instance_of(Gitlab::Cleanup::OrphanLfsFileReferences).not_to receive(:run!)
- subject.perform(*params)
+ subject.perform(*params)
- expect(project.lfs_objects.reload).to include(lfs_object)
- end
+ expect(project.lfs_objects.reload).to include(lfs_object)
end
end
end
@@ -180,7 +170,7 @@ RSpec.describe GitGarbageCollectWorker do
it 'returns silently' do
expect(subject).not_to receive(:command)
- expect_any_instance_of(Repository).not_to receive(:after_create_branch).and_call_original
+ expect_any_instance_of(Repository).not_to receive(:expire_branches_cache).and_call_original
expect_any_instance_of(Repository).not_to receive(:branch_names).and_call_original
expect_any_instance_of(Repository).not_to receive(:has_visible_content?).and_call_original
diff --git a/spec/workers/group_export_worker_spec.rb b/spec/workers/group_export_worker_spec.rb
index 5697e66b7d1..4e58e3886a4 100644
--- a/spec/workers/group_export_worker_spec.rb
+++ b/spec/workers/group_export_worker_spec.rb
@@ -26,4 +26,14 @@ RSpec.describe GroupExportWorker do
end
end
end
+
+ describe 'sidekiq options' do
+ it 'disables retry' do
+ expect(described_class.sidekiq_options['retry']).to eq(false)
+ end
+
+ it 'disables dead' do
+ expect(described_class.sidekiq_options['dead']).to eq(false)
+ end
+ end
end
diff --git a/spec/workers/group_import_worker_spec.rb b/spec/workers/group_import_worker_spec.rb
index fb2d49c21af..5171de7086b 100644
--- a/spec/workers/group_import_worker_spec.rb
+++ b/spec/workers/group_import_worker_spec.rb
@@ -3,17 +3,29 @@
require 'spec_helper'
RSpec.describe GroupImportWorker do
- let!(:user) { create(:user) }
- let!(:group) { create(:group) }
+ let(:user) { create(:user) }
+ let(:group) { create(:group) }
subject { described_class.new }
before do
+ create(:group_import_state, group: group, user: user)
+
allow_next_instance_of(described_class) do |job|
allow(job).to receive(:jid).and_return(SecureRandom.hex(8))
end
end
+ describe 'sidekiq options' do
+ it 'disables retry' do
+ expect(described_class.sidekiq_options['retry']).to eq(false)
+ end
+
+ it 'disables dead' do
+ expect(described_class.sidekiq_options['dead']).to eq(false)
+ end
+ end
+
describe '#perform' do
context 'when it succeeds' do
before do
@@ -26,44 +38,11 @@ RSpec.describe GroupImportWorker do
subject.perform(user.id, group.id)
end
- context 'when the import state does not exist' do
- it 'creates group import' do
- expect(group.import_state).to be_nil
-
- subject.perform(user.id, group.id)
- import_state = group.reload.import_state
-
- expect(import_state).to be_instance_of(GroupImportState)
- expect(import_state.status_name).to eq(:finished)
- expect(import_state.jid).not_to be_empty
- end
-
- it 'sets the group import status to started' do
- expect_next_instance_of(GroupImportState) do |import|
- expect(import).to receive(:start!).and_call_original
- end
-
- subject.perform(user.id, group.id)
- end
-
- it 'sets the group import status to finished' do
- expect_next_instance_of(GroupImportState) do |import|
- expect(import).to receive(:finish!).and_call_original
- end
-
- subject.perform(user.id, group.id)
- end
- end
-
- context 'when the import state already exists' do
- it 'updates the existing state' do
- existing_state = create(:group_import_state, group: group)
-
- expect { subject.perform(user.id, group.id) }
- .not_to change { GroupImportState.count }
+ it 'updates the existing state' do
+ expect { subject.perform(user.id, group.id) }
+ .not_to change { GroupImportState.count }
- expect(existing_state.reload).to be_finished
- end
+ expect(group.import_state.reload).to be_finished
end
end
@@ -83,11 +62,9 @@ RSpec.describe GroupImportWorker do
end
it 'sets the group import status to failed' do
- expect_next_instance_of(GroupImportState) do |import|
- expect(import).to receive(:fail_op).and_call_original
- end
-
expect { subject.perform(user.id, group.id) }.to raise_exception(Gitlab::ImportExport::Error)
+
+ expect(group.import_state.reload.status).to eq(-1)
end
end
end
diff --git a/spec/workers/incident_management/add_severity_system_note_worker_spec.rb b/spec/workers/incident_management/add_severity_system_note_worker_spec.rb
new file mode 100644
index 00000000000..203c62ffe6f
--- /dev/null
+++ b/spec/workers/incident_management/add_severity_system_note_worker_spec.rb
@@ -0,0 +1,60 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe IncidentManagement::AddSeveritySystemNoteWorker do
+ let_it_be(:project) { create(:project) }
+ let_it_be(:user) { create(:user) }
+ let_it_be(:incident) { create(:incident, project: project) }
+ let_it_be(:issuable_severity) { create(:issuable_severity, issue: incident, severity: :medium) }
+
+ describe '#perform' do
+ let(:incident_id) { incident.id }
+ let(:user_id) { user.id }
+
+ subject(:perform) { described_class.new.perform(incident_id, user_id) }
+
+ shared_examples 'does not add a system note' do
+ it 'does not change incident notes count' do
+ expect { perform }.not_to change { incident.notes.count }
+ end
+ end
+
+ context 'when incident and user exist' do
+ it 'creates a system note' do
+ expect { perform }.to change { incident.notes.where(author: user).count }.by(1)
+ end
+ end
+
+ context 'when incident does not exist' do
+ let(:incident_id) { -1 }
+
+ it_behaves_like 'does not add a system note'
+ end
+
+ context 'when incident_id is nil' do
+ let(:incident_id) { nil }
+
+ it_behaves_like 'does not add a system note'
+ end
+
+ context 'when issue is not an incident' do
+ let_it_be(:issue) { create(:issue, project: project) }
+ let(:incident_id) { issue.id }
+
+ it_behaves_like 'does not add a system note'
+ end
+
+ context 'when user does not exist' do
+ let(:user_id) { -1 }
+
+ it_behaves_like 'does not add a system note'
+ end
+
+ context 'when user_id is nil' do
+ let(:user_id) { nil }
+
+ it_behaves_like 'does not add a system note'
+ end
+ end
+end
diff --git a/spec/workers/incident_management/process_alert_worker_spec.rb b/spec/workers/incident_management/process_alert_worker_spec.rb
index 20ab283b49b..41d4f31da24 100644
--- a/spec/workers/incident_management/process_alert_worker_spec.rb
+++ b/spec/workers/incident_management/process_alert_worker_spec.rb
@@ -9,7 +9,6 @@ RSpec.describe IncidentManagement::ProcessAlertWorker do
describe '#perform' do
let_it_be(:started_at) { Time.now.rfc3339 }
let_it_be(:payload) { { 'title' => 'title', 'start_time' => started_at } }
- let_it_be(:parsed_payload) { Gitlab::Alerting::NotificationPayloadParser.call(payload, project) }
let_it_be(:alert) { create(:alert_management_alert, project: project, payload: payload, started_at: started_at) }
let(:created_issue) { Issue.last! }
@@ -68,7 +67,6 @@ RSpec.describe IncidentManagement::ProcessAlertWorker do
context 'prometheus alert' do
let_it_be(:alert) { create(:alert_management_alert, :prometheus, project: project, started_at: started_at) }
- let_it_be(:parsed_payload) { alert.payload }
it_behaves_like 'creates issue successfully'
end
diff --git a/spec/workers/incident_management/process_prometheus_alert_worker_spec.rb b/spec/workers/incident_management/process_prometheus_alert_worker_spec.rb
index c294892a66f..2ca4193aa72 100644
--- a/spec/workers/incident_management/process_prometheus_alert_worker_spec.rb
+++ b/spec/workers/incident_management/process_prometheus_alert_worker_spec.rb
@@ -6,7 +6,7 @@ RSpec.describe IncidentManagement::ProcessPrometheusAlertWorker do
describe '#perform' do
let_it_be(:project) { create(:project) }
let_it_be(:prometheus_alert) { create(:prometheus_alert, project: project) }
- let(:payload_key) { Gitlab::Alerting::Alert.new(project: project, payload: alert_params).gitlab_fingerprint }
+ let(:payload_key) { Gitlab::AlertManagement::Payload::Prometheus.new(project: project, payload: alert_params).gitlab_fingerprint }
let!(:prometheus_alert_event) { create(:prometheus_alert_event, prometheus_alert: prometheus_alert, payload_key: payload_key) }
let!(:settings) { create(:project_incident_management_setting, project: project, create_issue: true) }
diff --git a/spec/workers/issuable_export_csv_worker_spec.rb b/spec/workers/issuable_export_csv_worker_spec.rb
new file mode 100644
index 00000000000..bcc2420996d
--- /dev/null
+++ b/spec/workers/issuable_export_csv_worker_spec.rb
@@ -0,0 +1,73 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe IssuableExportCsvWorker do
+ let(:user) { create(:user) }
+ let(:project) { create(:project, creator: user) }
+ let(:params) { {} }
+
+ subject { described_class.new.perform(issuable_type, user.id, project.id, params) }
+
+ context 'when issuable type is Issue' do
+ let(:issuable_type) { :issue }
+
+ it 'emails a CSV' do
+ expect { subject }.to change(ActionMailer::Base.deliveries, :size).by(1)
+ end
+
+ it 'ensures that project_id is passed to issues_finder' do
+ expect(IssuesFinder).to receive(:new).with(anything, hash_including(project_id: project.id)).and_call_original
+
+ subject
+ end
+
+ it 'removes sort parameter' do
+ expect(IssuesFinder).to receive(:new).with(anything, hash_not_including(:sort)).and_call_original
+
+ subject
+ end
+
+ it 'calls the issue export service' do
+ expect(Issues::ExportCsvService).to receive(:new).once.and_call_original
+
+ subject
+ end
+
+ context 'with params' do
+ let(:params) { { 'test_key' => true } }
+
+ it 'converts controller string keys to symbol keys for IssuesFinder' do
+ expect(IssuesFinder).to receive(:new).with(user, hash_including(test_key: true)).and_call_original
+
+ subject
+ end
+ end
+ end
+
+ context 'when issuable type is MergeRequest' do
+ let(:issuable_type) { :merge_request }
+
+ it 'emails a CSV' do
+ expect { subject }.to change(ActionMailer::Base.deliveries, :size).by(1)
+ end
+
+ it 'calls the MR export service' do
+ expect(MergeRequests::ExportCsvService).to receive(:new).with(anything, project).once.and_call_original
+
+ subject
+ end
+
+ it 'calls the MergeRequest finder' do
+ expect(MergeRequestsFinder).to receive(:new).once.and_call_original
+
+ subject
+ end
+ end
+
+ context 'when issuable type is User' do
+ let(:issuable_type) { :user }
+
+ it { expect { subject }.to raise_error(ArgumentError) }
+ end
+end
diff --git a/spec/workers/member_invitation_reminder_emails_worker_spec.rb b/spec/workers/member_invitation_reminder_emails_worker_spec.rb
new file mode 100644
index 00000000000..bfd08792c7c
--- /dev/null
+++ b/spec/workers/member_invitation_reminder_emails_worker_spec.rb
@@ -0,0 +1,39 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe MemberInvitationReminderEmailsWorker do
+ describe '#perform' do
+ subject { described_class.new.perform }
+
+ before do
+ create(:group_member, :invited, created_at: 2.days.ago)
+ end
+
+ context 'feature flag disabled' do
+ before do
+ stub_experiment(invitation_reminders: false)
+ end
+
+ it 'does not attempt to execute the invitation reminder service' do
+ expect(Members::InvitationReminderEmailService).not_to receive(:new)
+
+ subject
+ end
+ end
+
+ context 'feature flag enabled' do
+ before do
+ stub_experiment(invitation_reminders: true)
+ end
+
+ it 'executes the invitation reminder email service' do
+ expect_next_instance_of(Members::InvitationReminderEmailService) do |service|
+ expect(service).to receive(:execute)
+ end
+
+ subject
+ end
+ end
+ end
+end
diff --git a/spec/workers/metrics/dashboard/prune_old_annotations_worker_spec.rb b/spec/workers/metrics/dashboard/prune_old_annotations_worker_spec.rb
index d93612afe37..11343f69d6f 100644
--- a/spec/workers/metrics/dashboard/prune_old_annotations_worker_spec.rb
+++ b/spec/workers/metrics/dashboard/prune_old_annotations_worker_spec.rb
@@ -17,7 +17,7 @@ RSpec.describe Metrics::Dashboard::PruneOldAnnotationsWorker do
# is idempotent in the scope of 24h
expect { described_class.new.perform }.not_to change { Metrics::Dashboard::Annotation.all.to_a }
- Timecop.travel(24.hours.from_now) do
+ travel_to(24.hours.from_now) do
described_class.new.perform
expect(Metrics::Dashboard::Annotation.all).to match_array([one_day_old_annotation])
end
diff --git a/spec/workers/metrics/dashboard/sync_dashboards_worker_spec.rb b/spec/workers/metrics/dashboard/sync_dashboards_worker_spec.rb
new file mode 100644
index 00000000000..19b79835825
--- /dev/null
+++ b/spec/workers/metrics/dashboard/sync_dashboards_worker_spec.rb
@@ -0,0 +1,25 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Metrics::Dashboard::SyncDashboardsWorker do
+ include MetricsDashboardHelpers
+ subject(:worker) { described_class.new }
+
+ let(:project) { project_with_dashboard(dashboard_path) }
+ let(:dashboard_path) { '.gitlab/dashboards/test.yml' }
+
+ describe ".perform" do
+ it 'imports metrics' do
+ expect { worker.perform(project.id) }.to change(PrometheusMetric, :count).by(3)
+ end
+
+ it 'is idempotent' do
+ 2.times do
+ worker.perform(project.id)
+ end
+
+ expect(PrometheusMetric.count).to eq(3)
+ end
+ end
+end
diff --git a/spec/workers/post_receive_spec.rb b/spec/workers/post_receive_spec.rb
index 50d164d1705..77c1d16428f 100644
--- a/spec/workers/post_receive_spec.rb
+++ b/spec/workers/post_receive_spec.rb
@@ -281,7 +281,7 @@ RSpec.describe PostReceive do
before do
# Need to mock here so we can expect calls on project
- allow(Gitlab::GlRepository).to receive(:parse).and_return([project, project, Gitlab::GlRepository::WIKI])
+ allow(Gitlab::GlRepository).to receive(:parse).and_return([project.wiki, project, Gitlab::GlRepository::WIKI])
end
it 'updates project activity' do
@@ -290,7 +290,7 @@ RSpec.describe PostReceive do
# MySQL drops milliseconds in the timestamps, so advance at least
# a second to ensure we see changes.
- Timecop.freeze(1.second.from_now) do
+ travel_to(1.second.from_now) do
expect do
perform
project.reload
diff --git a/spec/workers/project_export_worker_spec.rb b/spec/workers/project_export_worker_spec.rb
index 1f54b6766a4..defecefc3cc 100644
--- a/spec/workers/project_export_worker_spec.rb
+++ b/spec/workers/project_export_worker_spec.rb
@@ -75,6 +75,10 @@ RSpec.describe ProjectExportWorker do
expect(described_class.sidekiq_options['retry']).to eq(false)
end
+ it 'disables dead' do
+ expect(described_class.sidekiq_options['dead']).to eq(false)
+ end
+
it 'sets default status expiration' do
expect(described_class.sidekiq_options['status_expiration']).to eq(StuckExportJobsWorker::EXPORT_JOBS_EXPIRATION)
end
diff --git a/spec/workers/propagate_integration_group_worker_spec.rb b/spec/workers/propagate_integration_group_worker_spec.rb
new file mode 100644
index 00000000000..fbf1fbf1fea
--- /dev/null
+++ b/spec/workers/propagate_integration_group_worker_spec.rb
@@ -0,0 +1,44 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe PropagateIntegrationGroupWorker do
+ describe '#perform' do
+ let_it_be(:group) { create(:group) }
+ let_it_be(:another_group) { create(:group) }
+ let_it_be(:subgroup1) { create(:group, parent: group) }
+ let_it_be(:subgroup2) { create(:group, parent: group) }
+ let_it_be(:integration) { create(:redmine_service, :instance) }
+ let(:job_args) { [integration.id, group.id, subgroup2.id] }
+
+ it_behaves_like 'an idempotent worker' do
+ it 'calls to BulkCreateIntegrationService' do
+ expect(BulkCreateIntegrationService).to receive(:new)
+ .with(integration, match_array([group, another_group, subgroup1, subgroup2]), 'group').twice
+ .and_return(double(execute: nil))
+
+ subject
+ end
+
+ context 'with a group integration' do
+ let_it_be(:integration) { create(:redmine_service, group: group, project: nil) }
+
+ it 'calls to BulkCreateIntegrationService' do
+ expect(BulkCreateIntegrationService).to receive(:new)
+ .with(integration, match_array([subgroup1, subgroup2]), 'group').twice
+ .and_return(double(execute: nil))
+
+ subject
+ end
+ end
+ end
+
+ context 'with an invalid integration id' do
+ it 'returns without failure' do
+ expect(BulkCreateIntegrationService).not_to receive(:new)
+
+ subject.perform(0, 1, 100)
+ end
+ end
+ end
+end
diff --git a/spec/workers/propagate_integration_inherit_worker_spec.rb b/spec/workers/propagate_integration_inherit_worker_spec.rb
new file mode 100644
index 00000000000..cbfee29a6a0
--- /dev/null
+++ b/spec/workers/propagate_integration_inherit_worker_spec.rb
@@ -0,0 +1,32 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe PropagateIntegrationInheritWorker do
+ describe '#perform' do
+ let_it_be(:integration) { create(:redmine_service, :instance) }
+ let_it_be(:integration1) { create(:redmine_service, inherit_from_id: integration.id) }
+ let_it_be(:integration2) { create(:bugzilla_service, inherit_from_id: integration.id) }
+ let_it_be(:integration3) { create(:redmine_service) }
+
+ it_behaves_like 'an idempotent worker' do
+ let(:job_args) { [integration.id, integration1.id, integration3.id] }
+
+ it 'calls to BulkCreateIntegrationService' do
+ expect(BulkUpdateIntegrationService).to receive(:new)
+ .with(integration, match_array(integration1)).twice
+ .and_return(double(execute: nil))
+
+ subject
+ end
+ end
+
+ context 'with an invalid integration id' do
+ it 'returns without failure' do
+ expect(BulkUpdateIntegrationService).not_to receive(:new)
+
+ subject.perform(0, integration1.id, integration3.id)
+ end
+ end
+ end
+end
diff --git a/spec/workers/propagate_integration_project_worker_spec.rb b/spec/workers/propagate_integration_project_worker_spec.rb
new file mode 100644
index 00000000000..0302af2acc9
--- /dev/null
+++ b/spec/workers/propagate_integration_project_worker_spec.rb
@@ -0,0 +1,44 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe PropagateIntegrationProjectWorker do
+ describe '#perform' do
+ let_it_be(:group) { create(:group) }
+ let_it_be(:project1) { create(:project) }
+ let_it_be(:project2) { create(:project, group: group) }
+ let_it_be(:project3) { create(:project, group: group) }
+ let_it_be(:integration) { create(:redmine_service, :instance) }
+ let(:job_args) { [integration.id, project1.id, project3.id] }
+
+ it_behaves_like 'an idempotent worker' do
+ it 'calls to BulkCreateIntegrationService' do
+ expect(BulkCreateIntegrationService).to receive(:new)
+ .with(integration, match_array([project1, project2, project3]), 'project').twice
+ .and_return(double(execute: nil))
+
+ subject
+ end
+
+ context 'with a group integration' do
+ let_it_be(:integration) { create(:redmine_service, group: group, project: nil) }
+
+ it 'calls to BulkCreateIntegrationService' do
+ expect(BulkCreateIntegrationService).to receive(:new)
+ .with(integration, match_array([project2, project3]), 'project').twice
+ .and_return(double(execute: nil))
+
+ subject
+ end
+ end
+ end
+
+ context 'with an invalid integration id' do
+ it 'returns without failure' do
+ expect(BulkCreateIntegrationService).not_to receive(:new)
+
+ subject.perform(0, 1, 100)
+ end
+ end
+ end
+end
diff --git a/spec/workers/web_hooks/destroy_worker_spec.rb b/spec/workers/web_hooks/destroy_worker_spec.rb
new file mode 100644
index 00000000000..fd26c8591ee
--- /dev/null
+++ b/spec/workers/web_hooks/destroy_worker_spec.rb
@@ -0,0 +1,59 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe WebHooks::DestroyWorker do
+ let_it_be(:project) { create(:project) }
+ let_it_be(:user) { create(:user) }
+
+ before_all do
+ project.add_maintainer(user)
+ end
+
+ subject { described_class.new }
+
+ describe "#perform" do
+ context 'with a Web hook' do
+ let!(:hook) { create(:project_hook, project: project) }
+ let!(:other_hook) { create(:project_hook, project: project) }
+ let!(:log) { create(:web_hook_log, web_hook: hook) }
+ let!(:other_log) { create(:web_hook_log, web_hook: other_hook) }
+
+ it "deletes the Web hook and logs", :aggregate_failures do
+ expect { subject.perform(user.id, hook.id) }
+ .to change { WebHookLog.count }.from(2).to(1)
+ .and change { WebHook.count }.from(2).to(1)
+
+ expect(WebHook.find(other_hook.id)).to be_present
+ expect(WebHookLog.find(other_log.id)).to be_present
+ end
+
+ it "raises and tracks an error if destroy failed" do
+ allow_next_instance_of(::WebHooks::DestroyService) do |instance|
+ expect(instance).to receive(:sync_destroy).with(anything).and_return({ status: :error, message: "failed" })
+ end
+
+ expect(Gitlab::ErrorTracking).to receive(:track_exception)
+ .with(an_instance_of(::WebHooks::DestroyService::DestroyError), web_hook_id: hook.id)
+ .and_call_original
+ expect { subject.perform(user.id, hook.id) }.to raise_error(::WebHooks::DestroyService::DestroyError)
+ end
+
+ context 'with unknown hook' do
+ it 'does not raise an error' do
+ expect { subject.perform(user.id, non_existing_record_id) }.not_to raise_error
+
+ expect(WebHook.count).to eq(2)
+ end
+ end
+
+ context 'with unknown user' do
+ it 'does not raise an error' do
+ expect { subject.perform(non_existing_record_id, hook.id) }.not_to raise_error
+
+ expect(WebHook.count).to eq(2)
+ end
+ end
+ end
+ end
+end