diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2022-04-20 10:00:54 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2022-04-20 10:00:54 +0000 |
commit | 3cccd102ba543e02725d247893729e5c73b38295 (patch) | |
tree | f36a04ec38517f5deaaacb5acc7d949688d1e187 /spec/workers | |
parent | 205943281328046ef7b4528031b90fbda70c75ac (diff) | |
download | gitlab-ce-3cccd102ba543e02725d247893729e5c73b38295.tar.gz |
Add latest changes from gitlab-org/gitlab@14-10-stable-eev14.10.0-rc42
Diffstat (limited to 'spec/workers')
27 files changed, 683 insertions, 224 deletions
diff --git a/spec/workers/bulk_import_worker_spec.rb b/spec/workers/bulk_import_worker_spec.rb index 12e29573156..7e301efe708 100644 --- a/spec/workers/bulk_import_worker_spec.rb +++ b/spec/workers/bulk_import_worker_spec.rb @@ -56,17 +56,6 @@ RSpec.describe BulkImportWorker do end end - context 'when maximum allowed number of import entities in progress' do - it 'reenqueues itself' do - bulk_import = create(:bulk_import, :started) - (described_class::DEFAULT_BATCH_SIZE + 1).times { |_| create(:bulk_import_entity, :started, bulk_import: bulk_import) } - - expect(described_class).to receive(:perform_in).with(described_class::PERFORM_DELAY, bulk_import.id) - - subject.perform(bulk_import.id) - end - end - context 'when bulk import is created' do it 'marks bulk import as started' do bulk_import = create(:bulk_import, :created) @@ -84,7 +73,7 @@ RSpec.describe BulkImportWorker do expect { subject.perform(bulk_import.id) } .to change(BulkImports::Tracker, :count) - .by(BulkImports::Groups::Stage.new(bulk_import).pipelines.size * 2) + .by(BulkImports::Groups::Stage.new(entity_1).pipelines.size * 2) expect(entity_1.trackers).not_to be_empty expect(entity_2.trackers).not_to be_empty @@ -93,21 +82,17 @@ RSpec.describe BulkImportWorker do context 'when there are created entities to process' do let_it_be(:bulk_import) { create(:bulk_import, :created) } - before do - stub_const("#{described_class}::DEFAULT_BATCH_SIZE", 1) - end - - it 'marks a batch of entities as started, enqueues EntityWorker, ExportRequestWorker and reenqueues' do + it 'marks all entities as started, enqueues EntityWorker, ExportRequestWorker and reenqueues' do create(:bulk_import_entity, :created, bulk_import: bulk_import) create(:bulk_import_entity, :created, bulk_import: bulk_import) expect(described_class).to receive(:perform_in).with(described_class::PERFORM_DELAY, bulk_import.id) - expect(BulkImports::EntityWorker).to receive(:perform_async) - expect(BulkImports::ExportRequestWorker).to receive(:perform_async) + expect(BulkImports::EntityWorker).to receive(:perform_async).twice + expect(BulkImports::ExportRequestWorker).to receive(:perform_async).twice subject.perform(bulk_import.id) - expect(bulk_import.entities.map(&:status_name)).to contain_exactly(:created, :started) + expect(bulk_import.entities.map(&:status_name)).to contain_exactly(:started, :started) end context 'when there are project entities to process' do diff --git a/spec/workers/bulk_imports/entity_worker_spec.rb b/spec/workers/bulk_imports/entity_worker_spec.rb index ce45299c7f7..ab85b587975 100644 --- a/spec/workers/bulk_imports/entity_worker_spec.rb +++ b/spec/workers/bulk_imports/entity_worker_spec.rb @@ -36,9 +36,11 @@ RSpec.describe BulkImports::EntityWorker do expect(logger) .to receive(:info).twice .with( - worker: described_class.name, - entity_id: entity.id, - current_stage: nil + hash_including( + 'entity_id' => entity.id, + 'current_stage' => nil, + 'message' => 'Stage starting' + ) ) end @@ -58,24 +60,26 @@ RSpec.describe BulkImports::EntityWorker do expect(BulkImports::PipelineWorker) .to receive(:perform_async) - .and_raise(exception) + .and_raise(exception) expect_next_instance_of(Gitlab::Import::Logger) do |logger| expect(logger) .to receive(:info).twice .with( - worker: described_class.name, - entity_id: entity.id, - current_stage: nil + hash_including( + 'entity_id' => entity.id, + 'current_stage' => nil + ) ) expect(logger) .to receive(:error) .with( - worker: described_class.name, - entity_id: entity.id, - current_stage: nil, - error_message: 'Error!' + hash_including( + 'entity_id' => entity.id, + 'current_stage' => nil, + 'message' => 'Error!' + ) ) end @@ -90,6 +94,18 @@ RSpec.describe BulkImports::EntityWorker do let(:job_args) { [entity.id, 0] } it 'do not enqueue a new pipeline job if the current stage still running' do + expect_next_instance_of(Gitlab::Import::Logger) do |logger| + expect(logger) + .to receive(:info).twice + .with( + hash_including( + 'entity_id' => entity.id, + 'current_stage' => 0, + 'message' => 'Stage running' + ) + ) + end + expect(BulkImports::PipelineWorker) .not_to receive(:perform_async) @@ -110,9 +126,10 @@ RSpec.describe BulkImports::EntityWorker do expect(logger) .to receive(:info).twice .with( - worker: described_class.name, - entity_id: entity.id, - current_stage: 0 + hash_including( + 'entity_id' => entity.id, + 'current_stage' => 0 + ) ) end diff --git a/spec/workers/bulk_imports/export_request_worker_spec.rb b/spec/workers/bulk_imports/export_request_worker_spec.rb index 4f452e3dd60..846df63a4d7 100644 --- a/spec/workers/bulk_imports/export_request_worker_spec.rb +++ b/spec/workers/bulk_imports/export_request_worker_spec.rb @@ -35,14 +35,16 @@ RSpec.describe BulkImports::ExportRequestWorker do expect(client).to receive(:post).and_raise(BulkImports::NetworkError, 'Export error').twice end - expect(Gitlab::Import::Logger).to receive(:warn).with( - bulk_import_entity_id: entity.id, - pipeline_class: 'ExportRequestWorker', - exception_class: 'BulkImports::NetworkError', - exception_message: 'Export error', - correlation_id_value: anything, - bulk_import_id: bulk_import.id, - bulk_import_entity_type: entity.source_type + expect(Gitlab::Import::Logger).to receive(:error).with( + hash_including( + 'bulk_import_entity_id' => entity.id, + 'pipeline_class' => 'ExportRequestWorker', + 'exception_class' => 'BulkImports::NetworkError', + 'exception_message' => 'Export error', + 'correlation_id_value' => anything, + 'bulk_import_id' => bulk_import.id, + 'bulk_import_entity_type' => entity.source_type + ) ).twice perform_multiple(job_args) diff --git a/spec/workers/bulk_imports/pipeline_worker_spec.rb b/spec/workers/bulk_imports/pipeline_worker_spec.rb index cb7e70a6749..3578fec5bc0 100644 --- a/spec/workers/bulk_imports/pipeline_worker_spec.rb +++ b/spec/workers/bulk_imports/pipeline_worker_spec.rb @@ -34,9 +34,10 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:info) .with( - worker: described_class.name, - pipeline_name: 'FakePipeline', - entity_id: entity.id + hash_including( + 'pipeline_name' => 'FakePipeline', + 'entity_id' => entity.id + ) ) end @@ -44,7 +45,7 @@ RSpec.describe BulkImports::PipelineWorker do .to receive(:perform_async) .with(entity.id, pipeline_tracker.stage) - expect(subject).to receive(:jid).and_return('jid') + allow(subject).to receive(:jid).and_return('jid') subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id) @@ -79,10 +80,11 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:error) .with( - worker: described_class.name, - pipeline_tracker_id: pipeline_tracker.id, - entity_id: entity.id, - message: 'Unstarted pipeline not found' + hash_including( + 'pipeline_tracker_id' => pipeline_tracker.id, + 'entity_id' => entity.id, + 'message' => 'Unstarted pipeline not found' + ) ) end @@ -107,10 +109,11 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:error) .with( - worker: described_class.name, - pipeline_name: 'InexistentPipeline', - entity_id: entity.id, - message: "'InexistentPipeline' is not a valid BulkImport Pipeline" + hash_including( + 'pipeline_name' => 'InexistentPipeline', + 'entity_id' => entity.id, + 'message' => "'InexistentPipeline' is not a valid BulkImport Pipeline" + ) ) end @@ -126,7 +129,7 @@ RSpec.describe BulkImports::PipelineWorker do .to receive(:perform_async) .with(entity.id, pipeline_tracker.stage) - expect(subject).to receive(:jid).and_return('jid') + allow(subject).to receive(:jid).and_return('jid') subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id) @@ -151,10 +154,11 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:error) .with( - worker: described_class.name, - pipeline_name: 'Pipeline', - entity_id: entity.id, - message: 'Failed entity status' + hash_including( + 'pipeline_name' => 'Pipeline', + 'entity_id' => entity.id, + 'message' => 'Failed entity status' + ) ) end @@ -183,7 +187,7 @@ RSpec.describe BulkImports::PipelineWorker do .and_raise(exception) end - expect(subject).to receive(:jid).and_return('jid').twice + allow(subject).to receive(:jid).and_return('jid') expect_any_instance_of(BulkImports::Tracker) do |tracker| expect(tracker).to receive(:retry).and_call_original @@ -193,9 +197,10 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:info) .with( - worker: described_class.name, - pipeline_name: 'FakePipeline', - entity_id: entity.id + hash_including( + 'pipeline_name' => 'FakePipeline', + 'entity_id' => entity.id + ) ) end @@ -292,10 +297,11 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:error) .with( - worker: described_class.name, - pipeline_name: 'NdjsonPipeline', - entity_id: entity.id, - message: 'Pipeline timeout' + hash_including( + 'pipeline_name' => 'NdjsonPipeline', + 'entity_id' => entity.id, + 'message' => 'Pipeline timeout' + ) ) end @@ -318,10 +324,11 @@ RSpec.describe BulkImports::PipelineWorker do expect(logger) .to receive(:error) .with( - worker: described_class.name, - pipeline_name: 'NdjsonPipeline', - entity_id: entity.id, - message: 'Error!' + hash_including( + 'pipeline_name' => 'NdjsonPipeline', + 'entity_id' => entity.id, + 'message' => 'Error!' + ) ) end diff --git a/spec/workers/bulk_imports/stuck_import_worker_spec.rb b/spec/workers/bulk_imports/stuck_import_worker_spec.rb new file mode 100644 index 00000000000..7dfb6532c07 --- /dev/null +++ b/spec/workers/bulk_imports/stuck_import_worker_spec.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe BulkImports::StuckImportWorker do + let_it_be(:created_bulk_import) { create(:bulk_import, :created) } + let_it_be(:started_bulk_import) { create(:bulk_import, :started) } + let_it_be(:stale_created_bulk_import) { create(:bulk_import, :created, created_at: 3.days.ago) } + let_it_be(:stale_started_bulk_import) { create(:bulk_import, :started, created_at: 3.days.ago) } + let_it_be(:stale_created_bulk_import_entity) { create(:bulk_import_entity, :created, created_at: 3.days.ago) } + let_it_be(:stale_started_bulk_import_entity) { create(:bulk_import_entity, :started, created_at: 3.days.ago) } + let_it_be(:started_bulk_import_tracker) { create(:bulk_import_tracker, :started, entity: stale_started_bulk_import_entity) } + + subject { described_class.new.perform } + + describe 'perform' do + it 'updates the status of bulk imports to timeout' do + expect { subject }.to change { stale_created_bulk_import.reload.status_name }.from(:created).to(:timeout) + .and change { stale_started_bulk_import.reload.status_name }.from(:started).to(:timeout) + end + + it 'updates the status of bulk import entities to timeout' do + expect { subject }.to change { stale_created_bulk_import_entity.reload.status_name }.from(:created).to(:timeout) + .and change { stale_started_bulk_import_entity.reload.status_name }.from(:started).to(:timeout) + end + + it 'updates the status of stale entities trackers to timeout' do + expect { subject }.to change { started_bulk_import_tracker.reload.status_name }.from(:started).to(:timeout) + end + + it 'does not update the status of non-stale records' do + expect { subject }.to not_change { created_bulk_import.reload.status } + .and not_change { started_bulk_import.reload.status } + end + end +end diff --git a/spec/workers/ci/update_locked_unknown_artifacts_worker_spec.rb b/spec/workers/ci/update_locked_unknown_artifacts_worker_spec.rb new file mode 100644 index 00000000000..b42d135b1b6 --- /dev/null +++ b/spec/workers/ci/update_locked_unknown_artifacts_worker_spec.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Ci::UpdateLockedUnknownArtifactsWorker do + let(:worker) { described_class.new } + + describe '#perform' do + it 'executes an instance of Ci::JobArtifacts::UpdateUnknownLockedStatusService' do + expect_next_instance_of(Ci::JobArtifacts::UpdateUnknownLockedStatusService) do |instance| + expect(instance).to receive(:execute).and_call_original + end + + expect(worker).to receive(:log_extra_metadata_on_done).with(:removed_count, 0) + expect(worker).to receive(:log_extra_metadata_on_done).with(:locked_count, 0) + + worker.perform + end + + context 'with the ci_job_artifacts_backlog_work flag shut off' do + before do + stub_feature_flags(ci_job_artifacts_backlog_work: false) + end + + it 'does not instantiate a new Ci::JobArtifacts::UpdateUnknownLockedStatusService' do + expect(Ci::JobArtifacts::UpdateUnknownLockedStatusService).not_to receive(:new) + + worker.perform + end + + it 'does not log any artifact counts' do + expect(worker).not_to receive(:log_extra_metadata_on_done) + + worker.perform + end + + it 'does not query the database' do + query_count = ActiveRecord::QueryRecorder.new { worker.perform }.count + + expect(query_count).to eq(0) + end + end + end +end diff --git a/spec/workers/concerns/application_worker_spec.rb b/spec/workers/concerns/application_worker_spec.rb index 95d9b982fc4..707fa0c9c78 100644 --- a/spec/workers/concerns/application_worker_spec.rb +++ b/spec/workers/concerns/application_worker_spec.rb @@ -49,7 +49,7 @@ RSpec.describe ApplicationWorker do worker.feature_category :pages expect(worker.sidekiq_options['queue']).to eq('queue_2') - worker.feature_category_not_owned! + worker.feature_category :not_owned expect(worker.sidekiq_options['queue']).to eq('queue_3') worker.urgency :high diff --git a/spec/workers/container_registry/migration/enqueuer_worker_spec.rb b/spec/workers/container_registry/migration/enqueuer_worker_spec.rb index 12c14c35365..81fa28dc603 100644 --- a/spec/workers/container_registry/migration/enqueuer_worker_spec.rb +++ b/spec/workers/container_registry/migration/enqueuer_worker_spec.rb @@ -2,8 +2,13 @@ require 'spec_helper' -RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures do +RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures, :clean_gitlab_redis_shared_state do + using RSpec::Parameterized::TableSyntax + include ExclusiveLeaseHelpers + let_it_be_with_reload(:container_repository) { create(:container_repository, created_at: 2.days.ago) } + let_it_be(:importing_repository) { create(:container_repository, :importing) } + let_it_be(:pre_importing_repository) { create(:container_repository, :pre_importing) } let(:worker) { described_class.new } @@ -24,14 +29,14 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures end end - shared_examples 're-enqueuing based on capacity' do + shared_examples 're-enqueuing based on capacity' do |capacity_limit: 4| context 'below capacity' do before do - allow(ContainerRegistry::Migration).to receive(:capacity).and_return(9999) + allow(ContainerRegistry::Migration).to receive(:capacity).and_return(capacity_limit) end it 're-enqueues the worker' do - expect(ContainerRegistry::Migration::EnqueuerWorker).to receive(:perform_async) + expect(described_class).to receive(:perform_async) subject end @@ -43,7 +48,7 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures end it 'does not re-enqueue the worker' do - expect(ContainerRegistry::Migration::EnqueuerWorker).not_to receive(:perform_async) + expect(described_class).not_to receive(:perform_async) subject end @@ -51,24 +56,46 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures end context 'with qualified repository' do - it 'starts the pre-import for the next qualified repository' do + before do method = worker.method(:next_repository) allow(worker).to receive(:next_repository) do next_qualified_repository = method.call allow(next_qualified_repository).to receive(:migration_pre_import).and_return(:ok) next_qualified_repository end + end - expect(worker).to receive(:log_extra_metadata_on_done) - .with(:container_repository_id, container_repository.id) - expect(worker).to receive(:log_extra_metadata_on_done) - .with(:import_type, 'next') + it 'starts the pre-import for the next qualified repository' do + expect_log_extra_metadata( + import_type: 'next', + container_repository_id: container_repository.id, + container_repository_path: container_repository.path, + container_repository_migration_state: 'pre_importing' + ) subject expect(container_repository.reload).to be_pre_importing end + context 'when the new pre-import maxes out the capacity' do + before do + # set capacity to 10 + stub_feature_flags( + container_registry_migration_phase2_capacity_25: false + ) + + # Plus 2 created above gives 9 importing repositories + create_list(:container_repository, 7, :importing) + end + + it 'does not re-enqueue the worker' do + expect(described_class).not_to receive(:perform_async) + + subject + end + end + it_behaves_like 're-enqueuing based on capacity' end @@ -77,7 +104,11 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures allow(ContainerRegistry::Migration).to receive(:enabled?).and_return(false) end - it_behaves_like 'no action' + it_behaves_like 'no action' do + before do + expect_log_extra_metadata(migration_enabled: false) + end + end end context 'above capacity' do @@ -87,7 +118,11 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures allow(ContainerRegistry::Migration).to receive(:capacity).and_return(1) end - it_behaves_like 'no action' + it_behaves_like 'no action' do + before do + expect_log_extra_metadata(below_capacity: false, max_capacity_setting: 1) + end + end it 'does not re-enqueue the worker' do expect(ContainerRegistry::Migration::EnqueuerWorker).not_to receive(:perform_async) @@ -97,38 +132,91 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures end context 'too soon before previous completed import step' do - before do - create(:container_repository, :import_done, migration_import_done_at: 1.minute.ago) - allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(1.hour) + where(:state, :timestamp) do + :import_done | :migration_import_done_at + :pre_import_done | :migration_pre_import_done_at + :import_aborted | :migration_aborted_at + :import_skipped | :migration_skipped_at + end + + with_them do + before do + allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes) + create(:container_repository, state, timestamp => 1.minute.ago) + end + + it_behaves_like 'no action' do + before do + expect_log_extra_metadata(waiting_time_passed: false, current_waiting_time_setting: 45.minutes) + end + end end - it_behaves_like 'no action' + context 'when last completed repository has nil timestamps' do + before do + allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes) + create(:container_repository, migration_state: 'import_done') + end + + it 'continues to try the next import' do + expect { subject }.to change { container_repository.reload.migration_state } + end + end end context 'when an aborted import is available' do let_it_be(:aborted_repository) { create(:container_repository, :import_aborted) } - it 'retries the import for the aborted repository' do - method = worker.method(:next_aborted_repository) - allow(worker).to receive(:next_aborted_repository) do - next_aborted_repository = method.call - allow(next_aborted_repository).to receive(:migration_import).and_return(:ok) - allow(next_aborted_repository.gitlab_api_client).to receive(:import_status).and_return('import_failed') - next_aborted_repository + context 'with a successful registry request' do + before do + method = worker.method(:next_aborted_repository) + allow(worker).to receive(:next_aborted_repository) do + next_aborted_repository = method.call + allow(next_aborted_repository).to receive(:migration_import).and_return(:ok) + allow(next_aborted_repository.gitlab_api_client).to receive(:import_status).and_return('import_failed') + next_aborted_repository + end end - expect(worker).to receive(:log_extra_metadata_on_done) - .with(:container_repository_id, aborted_repository.id) - expect(worker).to receive(:log_extra_metadata_on_done) - .with(:import_type, 'retry') + it 'retries the import for the aborted repository' do + expect_log_extra_metadata( + import_type: 'retry', + container_repository_id: aborted_repository.id, + container_repository_path: aborted_repository.path, + container_repository_migration_state: 'importing' + ) - subject + subject - expect(aborted_repository.reload).to be_importing - expect(container_repository.reload).to be_default + expect(aborted_repository.reload).to be_importing + expect(container_repository.reload).to be_default + end + + it_behaves_like 're-enqueuing based on capacity' end - it_behaves_like 're-enqueuing based on capacity' + context 'when an error occurs' do + it 'does not abort that migration' do + method = worker.method(:next_aborted_repository) + allow(worker).to receive(:next_aborted_repository) do + next_aborted_repository = method.call + allow(next_aborted_repository).to receive(:retry_aborted_migration).and_raise(StandardError) + next_aborted_repository + end + + expect_log_extra_metadata( + import_type: 'retry', + container_repository_id: aborted_repository.id, + container_repository_path: aborted_repository.path, + container_repository_migration_state: 'import_aborted' + ) + + subject + + expect(aborted_repository.reload).to be_import_aborted + expect(container_repository.reload).to be_default + end + end end context 'when no repository qualifies' do @@ -147,6 +235,15 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures end it 'skips the repository' do + expect_log_extra_metadata( + import_type: 'next', + container_repository_id: container_repository.id, + container_repository_path: container_repository.path, + container_repository_migration_state: 'import_skipped', + tags_count_too_high: true, + max_tags_count_setting: 2 + ) + subject expect(container_repository.reload).to be_import_skipped @@ -154,7 +251,7 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures expect(container_repository.migration_skipped_at).not_to be_nil end - it_behaves_like 're-enqueuing based on capacity' + it_behaves_like 're-enqueuing based on capacity', capacity_limit: 3 end context 'when an error occurs' do @@ -163,10 +260,16 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures end it 'aborts the import' do + expect_log_extra_metadata( + import_type: 'next', + container_repository_id: container_repository.id, + container_repository_path: container_repository.path, + container_repository_migration_state: 'import_aborted' + ) + expect(Gitlab::ErrorTracking).to receive(:log_exception).with( instance_of(StandardError), - next_repository_id: container_repository.id, - next_aborted_repository_id: nil + next_repository_id: container_repository.id ) subject @@ -174,5 +277,26 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures expect(container_repository.reload).to be_import_aborted end end + + context 'with the exclusive lease taken' do + let(:lease_key) { worker.send(:lease_key) } + + before do + stub_exclusive_lease_taken(lease_key, timeout: 30.minutes) + end + + it 'does not perform' do + expect(worker).not_to receive(:runnable?) + expect(worker).not_to receive(:re_enqueue_if_capacity) + + subject + end + end + + def expect_log_extra_metadata(metadata) + metadata.each do |key, value| + expect(worker).to receive(:log_extra_metadata_on_done).with(key, value) + end + end end end diff --git a/spec/workers/container_registry/migration/guard_worker_spec.rb b/spec/workers/container_registry/migration/guard_worker_spec.rb index 7d1df320d4e..299d1204af3 100644 --- a/spec/workers/container_registry/migration/guard_worker_spec.rb +++ b/spec/workers/container_registry/migration/guard_worker_spec.rb @@ -3,8 +3,6 @@ require 'spec_helper' RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do - include_context 'container registry client' - let(:worker) { described_class.new } describe '#perform' do @@ -13,11 +11,12 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do let(:importing_migrations) { ::ContainerRepository.with_migration_states(:importing) } let(:import_aborted_migrations) { ::ContainerRepository.with_migration_states(:import_aborted) } let(:import_done_migrations) { ::ContainerRepository.with_migration_states(:import_done) } + let(:import_skipped_migrations) { ::ContainerRepository.with_migration_states(:import_skipped) } subject { worker.perform } before do - stub_container_registry_config(enabled: true, api_url: registry_api_url, key: 'spec/fixtures/x509_certificate_pk.key') + stub_container_registry_config(enabled: true, api_url: 'http://container-registry', key: 'spec/fixtures/x509_certificate_pk.key') allow(::ContainerRegistry::Migration).to receive(:max_step_duration).and_return(5.minutes) end @@ -26,20 +25,57 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do allow(::Gitlab).to receive(:com?).and_return(true) end - shared_examples 'not aborting any migration' do - it 'will not abort the migration' do - expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1) - expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 0) - expect(worker).to receive(:log_extra_metadata_on_done).with(:long_running_stale_migration_container_repository_ids, [stale_migration.id]) + shared_examples 'handling long running migrations' do + before do + allow_next_found_instance_of(ContainerRepository) do |repository| + allow(repository).to receive(:migration_cancel).and_return(migration_cancel_response) + end + end - expect { subject } - .to not_change(pre_importing_migrations, :count) - .and not_change(pre_import_done_migrations, :count) - .and not_change(importing_migrations, :count) - .and not_change(import_done_migrations, :count) - .and not_change(import_aborted_migrations, :count) - .and not_change { stale_migration.reload.migration_state } - .and not_change { ongoing_migration.migration_state } + context 'migration is canceled' do + let(:migration_cancel_response) { { status: :ok } } + + it 'will not abort the migration' do + expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1) + expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 1) + expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_ids, [stale_migration.id]) + + expect { subject } + .to change(import_skipped_migrations, :count) + + expect(stale_migration.reload.migration_state).to eq('import_skipped') + expect(stale_migration.reload.migration_skipped_reason).to eq('migration_canceled') + end + end + + context 'migration cancelation fails with an error' do + let(:migration_cancel_response) { { status: :error } } + + it 'will abort the migration' do + expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1) + expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 1) + expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_ids, [stale_migration.id]) + + expect { subject } + .to change(import_aborted_migrations, :count).by(1) + .and change { stale_migration.reload.migration_state }.to('import_aborted') + .and not_change { ongoing_migration.migration_state } + end + end + + context 'migration receives bad request with a new status' do + let(:migration_cancel_response) { { status: :bad_request, migration_state: :import_done } } + + it 'will abort the migration' do + expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1) + expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 1) + expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_ids, [stale_migration.id]) + + expect { subject } + .to change(import_aborted_migrations, :count).by(1) + .and change { stale_migration.reload.migration_state }.to('import_aborted') + .and not_change { ongoing_migration.migration_state } + end end end @@ -86,7 +122,7 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do context 'the client returns pre_import_in_progress' do let(:import_status) { 'pre_import_in_progress' } - it_behaves_like 'not aborting any migration' + it_behaves_like 'handling long running migrations' end end @@ -141,7 +177,7 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do context 'the client returns import_in_progress' do let(:import_status) { 'import_in_progress' } - it_behaves_like 'not aborting any migration' + it_behaves_like 'handling long running migrations' end end end diff --git a/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb b/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb index 2663c650986..f3cf5450048 100644 --- a/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb +++ b/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb @@ -3,5 +3,5 @@ require 'spec_helper' RSpec.describe Database::BatchedBackgroundMigration::CiDatabaseWorker, :clean_gitlab_redis_shared_state do - it_behaves_like 'it runs batched background migration jobs', 'ci' + it_behaves_like 'it runs batched background migration jobs', 'ci', feature_flag: :execute_batched_migrations_on_schedule_ci_database end diff --git a/spec/workers/database/batched_background_migration_worker_spec.rb b/spec/workers/database/batched_background_migration_worker_spec.rb index a6c7db60abe..7f0883def3c 100644 --- a/spec/workers/database/batched_background_migration_worker_spec.rb +++ b/spec/workers/database/batched_background_migration_worker_spec.rb @@ -3,5 +3,5 @@ require 'spec_helper' RSpec.describe Database::BatchedBackgroundMigrationWorker do - it_behaves_like 'it runs batched background migration jobs', :main + it_behaves_like 'it runs batched background migration jobs', :main, feature_flag: :execute_batched_migrations_on_schedule end diff --git a/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb b/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb new file mode 100644 index 00000000000..116026ea8f7 --- /dev/null +++ b/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Database::CiNamespaceMirrorsConsistencyCheckWorker do + let(:worker) { described_class.new } + + describe '#perform' do + context 'feature flag is disabled' do + before do + stub_feature_flags(ci_namespace_mirrors_consistency_check: false) + end + + it 'does not perform the consistency check on namespaces' do + expect(Database::ConsistencyCheckService).not_to receive(:new) + expect(worker).not_to receive(:log_extra_metadata_on_done) + worker.perform + end + end + + context 'feature flag is enabled' do + before do + stub_feature_flags(ci_namespace_mirrors_consistency_check: true) + end + + it 'executes the consistency check on namespaces' do + expect(Database::ConsistencyCheckService).to receive(:new).and_call_original + expected_result = { batches: 0, matches: 0, mismatches: 0, mismatches_details: [] } + expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result) + worker.perform + end + end + + context 'logs should contain the detailed mismatches' do + let(:first_namespace) { Namespace.all.order(:id).limit(1).first } + let(:missing_namespace) { Namespace.all.order(:id).limit(2).last } + + before do + redis_shared_state_cleanup! + stub_feature_flags(ci_namespace_mirrors_consistency_check: true) + create_list(:namespace, 10) # This will also create Ci::NameSpaceMirror objects + missing_namespace.delete + + allow_next_instance_of(Database::ConsistencyCheckService) do |instance| + allow(instance).to receive(:random_start_id).and_return(Namespace.first.id) + end + end + + it 'reports the differences to the logs' do + expected_result = { + batches: 1, + matches: 9, + mismatches: 1, + mismatches_details: [{ + id: missing_namespace.id, + source_table: nil, + target_table: [missing_namespace.traversal_ids] + }], + start_id: first_namespace.id, + next_start_id: first_namespace.id # The batch size > number of namespaces + } + expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result) + worker.perform + end + end + end +end diff --git a/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb b/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb new file mode 100644 index 00000000000..b6bd825ffcd --- /dev/null +++ b/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Database::CiProjectMirrorsConsistencyCheckWorker do + let(:worker) { described_class.new } + + describe '#perform' do + context 'feature flag is disabled' do + before do + stub_feature_flags(ci_project_mirrors_consistency_check: false) + end + + it 'does not perform the consistency check on projects' do + expect(Database::ConsistencyCheckService).not_to receive(:new) + expect(worker).not_to receive(:log_extra_metadata_on_done) + worker.perform + end + end + + context 'feature flag is enabled' do + before do + stub_feature_flags(ci_project_mirrors_consistency_check: true) + end + + it 'executes the consistency check on projects' do + expect(Database::ConsistencyCheckService).to receive(:new).and_call_original + expected_result = { batches: 0, matches: 0, mismatches: 0, mismatches_details: [] } + expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result) + worker.perform + end + end + + context 'logs should contain the detailed mismatches' do + let(:first_project) { Project.all.order(:id).limit(1).first } + let(:missing_project) { Project.all.order(:id).limit(2).last } + + before do + redis_shared_state_cleanup! + stub_feature_flags(ci_project_mirrors_consistency_check: true) + create_list(:project, 10) # This will also create Ci::NameSpaceMirror objects + missing_project.delete + + allow_next_instance_of(Database::ConsistencyCheckService) do |instance| + allow(instance).to receive(:random_start_id).and_return(Project.first.id) + end + end + + it 'reports the differences to the logs' do + expected_result = { + batches: 1, + matches: 9, + mismatches: 1, + mismatches_details: [{ + id: missing_project.id, + source_table: nil, + target_table: [missing_project.namespace_id] + }], + start_id: first_project.id, + next_start_id: first_project.id # The batch size > number of projects + } + expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result) + worker.perform + end + end + end +end diff --git a/spec/workers/every_sidekiq_worker_spec.rb b/spec/workers/every_sidekiq_worker_spec.rb index 47205943f70..0351b500747 100644 --- a/spec/workers/every_sidekiq_worker_spec.rb +++ b/spec/workers/every_sidekiq_worker_spec.rb @@ -54,7 +54,7 @@ RSpec.describe 'Every Sidekiq worker' do # All Sidekiq worker classes should declare a valid `feature_category` # or explicitly be excluded with the `feature_category_not_owned!` annotation. # Please see doc/development/sidekiq_style_guide.md#feature-categorization for more details. - it 'has a feature_category or feature_category_not_owned! attribute', :aggregate_failures do + it 'has a feature_category attribute', :aggregate_failures do workers_without_defaults.each do |worker| expect(worker.get_feature_category).to be_a(Symbol), "expected #{worker.inspect} to declare a feature_category or feature_category_not_owned!" end diff --git a/spec/workers/gitlab/github_import/import_diff_note_worker_spec.rb b/spec/workers/gitlab/github_import/import_diff_note_worker_spec.rb index 34073d0ea39..af15f465107 100644 --- a/spec/workers/gitlab/github_import/import_diff_note_worker_spec.rb +++ b/spec/workers/gitlab/github_import/import_diff_note_worker_spec.rb @@ -7,7 +7,7 @@ RSpec.describe Gitlab::GithubImport::ImportDiffNoteWorker do describe '#import' do it 'imports a diff note' do - project = double(:project, full_path: 'foo/bar', id: 1) + project = double(:project, full_path: 'foo/bar', id: 1, import_state: nil) client = double(:client) importer = double(:importer) hash = { diff --git a/spec/workers/gitlab/github_import/import_issue_worker_spec.rb b/spec/workers/gitlab/github_import/import_issue_worker_spec.rb index dc0338eccad..29f21c1d184 100644 --- a/spec/workers/gitlab/github_import/import_issue_worker_spec.rb +++ b/spec/workers/gitlab/github_import/import_issue_worker_spec.rb @@ -7,7 +7,7 @@ RSpec.describe Gitlab::GithubImport::ImportIssueWorker do describe '#import' do it 'imports an issue' do - project = double(:project, full_path: 'foo/bar', id: 1) + project = double(:project, full_path: 'foo/bar', id: 1, import_state: nil) client = double(:client) importer = double(:importer) hash = { diff --git a/spec/workers/gitlab/github_import/import_note_worker_spec.rb b/spec/workers/gitlab/github_import/import_note_worker_spec.rb index bc254e6246d..f4598340938 100644 --- a/spec/workers/gitlab/github_import/import_note_worker_spec.rb +++ b/spec/workers/gitlab/github_import/import_note_worker_spec.rb @@ -7,7 +7,7 @@ RSpec.describe Gitlab::GithubImport::ImportNoteWorker do describe '#import' do it 'imports a note' do - project = double(:project, full_path: 'foo/bar', id: 1) + project = double(:project, full_path: 'foo/bar', id: 1, import_state: nil) client = double(:client) importer = double(:importer) hash = { diff --git a/spec/workers/gitlab/github_import/import_pull_request_worker_spec.rb b/spec/workers/gitlab/github_import/import_pull_request_worker_spec.rb index 6fe9741075f..faed2f8f340 100644 --- a/spec/workers/gitlab/github_import/import_pull_request_worker_spec.rb +++ b/spec/workers/gitlab/github_import/import_pull_request_worker_spec.rb @@ -7,7 +7,7 @@ RSpec.describe Gitlab::GithubImport::ImportPullRequestWorker do describe '#import' do it 'imports a pull request' do - project = double(:project, full_path: 'foo/bar', id: 1) + project = double(:project, full_path: 'foo/bar', id: 1, import_state: nil) client = double(:client) importer = double(:importer) hash = { diff --git a/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb b/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb index f3ea14ad539..5e0b07067df 100644 --- a/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb +++ b/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb @@ -11,11 +11,9 @@ RSpec.describe MergeRequests::UpdateHeadPipelineWorker do let(:pipeline) { create(:ci_pipeline, project: project, ref: ref) } let(:event) { Ci::PipelineCreatedEvent.new(data: { pipeline_id: pipeline.id }) } - subject { consume_event(event) } + subject { consume_event(subscriber: described_class, event: event) } - def consume_event(event) - described_class.new.perform(event.class.name, event.data) - end + it_behaves_like 'subscribes to event' context 'when merge requests already exist for this source branch', :sidekiq_inline do let(:merge_request_1) do diff --git a/spec/workers/namespaces/invite_team_email_worker_spec.rb b/spec/workers/namespaces/invite_team_email_worker_spec.rb deleted file mode 100644 index 47fdff9a8ef..00000000000 --- a/spec/workers/namespaces/invite_team_email_worker_spec.rb +++ /dev/null @@ -1,27 +0,0 @@ -# frozen_string_literal: true - -require 'spec_helper' - -RSpec.describe Namespaces::InviteTeamEmailWorker do - let_it_be(:user) { create(:user) } - let_it_be(:group) { create(:group) } - - it 'sends the email' do - expect(Namespaces::InviteTeamEmailService).to receive(:send_email).with(user, group).once - subject.perform(group.id, user.id) - end - - context 'when user id is non-existent' do - it 'does not send the email' do - expect(Namespaces::InviteTeamEmailService).not_to receive(:send_email) - subject.perform(group.id, non_existing_record_id) - end - end - - context 'when group id is non-existent' do - it 'does not send the email' do - expect(Namespaces::InviteTeamEmailService).not_to receive(:send_email) - subject.perform(non_existing_record_id, user.id) - end - end -end diff --git a/spec/workers/namespaces/root_statistics_worker_spec.rb b/spec/workers/namespaces/root_statistics_worker_spec.rb index a97a850bbcf..7b774da0bdc 100644 --- a/spec/workers/namespaces/root_statistics_worker_spec.rb +++ b/spec/workers/namespaces/root_statistics_worker_spec.rb @@ -10,7 +10,7 @@ RSpec.describe Namespaces::RootStatisticsWorker, '#perform' do context 'with a namespace' do it 'executes refresher service' do expect_any_instance_of(Namespaces::StatisticsRefresherService) - .to receive(:execute) + .to receive(:execute).and_call_original worker.perform(group.id) end diff --git a/spec/workers/namespaces/update_root_statistics_worker_spec.rb b/spec/workers/namespaces/update_root_statistics_worker_spec.rb index a525904b757..f2f633a39ca 100644 --- a/spec/workers/namespaces/update_root_statistics_worker_spec.rb +++ b/spec/workers/namespaces/update_root_statistics_worker_spec.rb @@ -9,11 +9,9 @@ RSpec.describe Namespaces::UpdateRootStatisticsWorker do Projects::ProjectDeletedEvent.new(data: { project_id: 1, namespace_id: namespace_id }) end - subject { consume_event(event) } + subject { consume_event(subscriber: described_class, event: event) } - def consume_event(event) - described_class.new.perform(event.class.name, event.data) - end + it_behaves_like 'subscribes to event' it 'enqueues ScheduleAggregationWorker' do expect(Namespaces::ScheduleAggregationWorker).to receive(:perform_async).with(namespace_id) diff --git a/spec/workers/packages/cleanup_package_file_worker_spec.rb b/spec/workers/packages/cleanup_package_file_worker_spec.rb index 33f89826312..380e8916d13 100644 --- a/spec/workers/packages/cleanup_package_file_worker_spec.rb +++ b/spec/workers/packages/cleanup_package_file_worker_spec.rb @@ -3,7 +3,7 @@ require 'spec_helper' RSpec.describe Packages::CleanupPackageFileWorker do - let_it_be(:package) { create(:package) } + let_it_be_with_reload(:package) { create(:package) } let(:worker) { described_class.new } @@ -23,24 +23,60 @@ RSpec.describe Packages::CleanupPackageFileWorker do expect(worker).to receive(:log_extra_metadata_on_done).twice expect { subject }.to change { Packages::PackageFile.count }.by(-1) - .and not_change { Packages::Package.count } + .and not_change { Packages::Package.count } + expect { package_file2.reload }.to raise_error(ActiveRecord::RecordNotFound) + end + + context 'with a duplicated PyPI package file' do + let_it_be_with_reload(:duplicated_package_file) { create(:package_file, package: package) } + + before do + package.update!(package_type: :pypi, version: '1.2.3') + duplicated_package_file.update_column(:file_name, package_file2.file_name) + end + + it 'deletes one of the duplicates' do + expect { subject }.to change { Packages::PackageFile.count }.by(-1) + .and not_change { Packages::Package.count } + expect { package_file2.reload }.to raise_error(ActiveRecord::RecordNotFound) + end end end - context 'with an error during the destroy' do + context 'with a package file to destroy' do let_it_be(:package_file) { create(:package_file, :pending_destruction) } - before do - expect(worker).to receive(:log_metadata).and_raise('Error!') + context 'with an error during the destroy' do + before do + allow(worker).to receive(:log_metadata).and_raise('Error!') + end + + it 'handles the error' do + expect { subject }.to change { Packages::PackageFile.error.count }.from(0).to(1) + expect(package_file.reload).to be_error + end end - it 'handles the error' do - expect { subject }.to change { Packages::PackageFile.error.count }.from(0).to(1) - expect(package_file.reload).to be_error + context 'when trying to destroy a destroyed record' do + before do + allow_next_found_instance_of(Packages::PackageFile) do |package_file| + destroy_method = package_file.method(:destroy!) + + allow(package_file).to receive(:destroy!) do + destroy_method.call + + raise 'Error!' + end + end + end + + it 'handles the error' do + expect { subject }.to change { Packages::PackageFile.count }.by(-1) + end end end - context 'removing the last package file' do + describe 'removing the last package file' do let_it_be(:package_file) { create(:package_file, :pending_destruction, package: package) } it 'deletes the package file and the package' do @@ -65,12 +101,12 @@ RSpec.describe Packages::CleanupPackageFileWorker do end describe '#remaining_work_count' do - before(:context) do - create_list(:package_file, 3, :pending_destruction, package: package) + before_all do + create_list(:package_file, 2, :pending_destruction, package: package) end subject { worker.remaining_work_count } - it { is_expected.to eq(3) } + it { is_expected.to eq(2) } end end diff --git a/spec/workers/project_export_worker_spec.rb b/spec/workers/project_export_worker_spec.rb index 9923d8bde7f..dd0a921059d 100644 --- a/spec/workers/project_export_worker_spec.rb +++ b/spec/workers/project_export_worker_spec.rb @@ -4,4 +4,30 @@ require 'spec_helper' RSpec.describe ProjectExportWorker do it_behaves_like 'export worker' + + context 'exporters duration measuring' do + let(:user) { create(:user) } + let(:project) { create(:project) } + let(:worker) { described_class.new } + + subject { worker.perform(user.id, project.id) } + + before do + project.add_owner(user) + end + + it 'logs exporters execution duration' do + expect(worker).to receive(:log_extra_metadata_on_done).with(:version_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:avatar_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:tree_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:uploads_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:repo_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:wiki_repo_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:lfs_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:snippets_repo_saver_duration_s, anything) + expect(worker).to receive(:log_extra_metadata_on_done).with(:design_repo_saver_duration_s, anything) + + subject + end + end end diff --git a/spec/workers/projects/post_creation_worker_spec.rb b/spec/workers/projects/post_creation_worker_spec.rb index 06acf601666..3158ac9fa27 100644 --- a/spec/workers/projects/post_creation_worker_spec.rb +++ b/spec/workers/projects/post_creation_worker_spec.rb @@ -63,7 +63,7 @@ RSpec.describe Projects::PostCreationWorker do end it 'cleans invalid record and logs warning', :aggregate_failures do - invalid_integration_record = build(:prometheus_integration, properties: { api_url: nil, manual_configuration: true }.to_json) + invalid_integration_record = build(:prometheus_integration, properties: { api_url: nil, manual_configuration: true }) allow(::Integrations::Prometheus).to receive(:new).and_return(invalid_integration_record) expect(Gitlab::ErrorTracking).to receive(:track_exception).with(an_instance_of(ActiveRecord::RecordInvalid), include(extra: { project_id: a_kind_of(Integer) })).twice diff --git a/spec/workers/projects/record_target_platforms_worker_spec.rb b/spec/workers/projects/record_target_platforms_worker_spec.rb new file mode 100644 index 00000000000..eb53e3f8608 --- /dev/null +++ b/spec/workers/projects/record_target_platforms_worker_spec.rb @@ -0,0 +1,87 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Projects::RecordTargetPlatformsWorker do + include ExclusiveLeaseHelpers + + let_it_be(:swift) { create(:programming_language, name: 'Swift') } + let_it_be(:objective_c) { create(:programming_language, name: 'Objective-C') } + let_it_be(:project) { create(:project, :repository, detected_repository_languages: true) } + + let(:worker) { described_class.new } + let(:service_result) { %w(ios osx watchos) } + let(:service_double) { instance_double(Projects::RecordTargetPlatformsService, execute: service_result) } + let(:lease_key) { "#{described_class.name.underscore}:#{project.id}" } + let(:lease_timeout) { described_class::LEASE_TIMEOUT } + + subject(:perform) { worker.perform(project.id) } + + before do + stub_exclusive_lease(lease_key, timeout: lease_timeout) + end + + shared_examples 'performs detection' do + it 'creates and executes a Projects::RecordTargetPlatformService instance for the project', :aggregate_failures do + expect(Projects::RecordTargetPlatformsService).to receive(:new).with(project) { service_double } + expect(service_double).to receive(:execute) + + perform + end + + it 'logs extra metadata on done', :aggregate_failures do + expect(Projects::RecordTargetPlatformsService).to receive(:new).with(project) { service_double } + expect(worker).to receive(:log_extra_metadata_on_done).with(:target_platforms, service_result) + + perform + end + end + + shared_examples 'does nothing' do + it 'does nothing' do + expect(Projects::RecordTargetPlatformsService).not_to receive(:new) + + perform + end + end + + context 'when project uses Swift programming language' do + let!(:repository_language) { create(:repository_language, project: project, programming_language: swift) } + + include_examples 'performs detection' + end + + context 'when project uses Objective-C programming language' do + let!(:repository_language) { create(:repository_language, project: project, programming_language: objective_c) } + + include_examples 'performs detection' + end + + context 'when the project does not contain programming languages for Apple platforms' do + it_behaves_like 'does nothing' + end + + context 'when project is not found' do + it 'does nothing' do + expect(Projects::RecordTargetPlatformsService).not_to receive(:new) + + worker.perform(non_existing_record_id) + end + end + + context 'when exclusive lease cannot be obtained' do + before do + stub_exclusive_lease_taken(lease_key) + end + + it_behaves_like 'does nothing' + end + + it 'has the `until_executed` deduplicate strategy' do + expect(described_class.get_deduplicate_strategy).to eq(:until_executed) + end + + it 'overrides #lease_release? to return false' do + expect(worker.send(:lease_release?)).to eq false + end +end diff --git a/spec/workers/quality/test_data_cleanup_worker_spec.rb b/spec/workers/quality/test_data_cleanup_worker_spec.rb deleted file mode 100644 index a17e6e0cb1a..00000000000 --- a/spec/workers/quality/test_data_cleanup_worker_spec.rb +++ /dev/null @@ -1,44 +0,0 @@ -# frozen_string_literal: true - -require 'spec_helper' - -RSpec.describe Quality::TestDataCleanupWorker do - subject { described_class.new } - - shared_examples 'successful deletion' do - before do - allow(Gitlab).to receive(:staging?).and_return(true) - end - - it 'removes test groups' do - expect { subject.perform }.to change(Group, :count).by(-test_group_count) - end - end - - describe "#perform" do - context 'with multiple test groups to remove' do - let(:test_group_count) { 5 } - let!(:groups_to_remove) { create_list(:group, test_group_count, :test_group) } - let!(:group_to_keep) { create(:group, path: 'test-group-fulfillment-keep', created_at: 1.day.ago) } - let!(:non_test_group) { create(:group) } - let(:non_test_owner_group) { create(:group, path: 'test-group-fulfillment1234', created_at: 4.days.ago) } - - before do - non_test_owner_group.add_owner(create(:user)) - end - - it_behaves_like 'successful deletion' - end - - context 'with paid groups' do - let(:test_group_count) { 1 } - let!(:paid_group) { create(:group, :test_group) } - - before do - allow(paid_group).to receive(:paid?).and_return(true) - end - - it_behaves_like 'successful deletion' - end - end -end |