diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2021-09-20 13:18:24 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2021-09-20 13:18:24 +0000 |
commit | 0653e08efd039a5905f3fa4f6e9cef9f5d2f799c (patch) | |
tree | 4dcc884cf6d81db44adae4aa99f8ec1233a41f55 /spec/workers | |
parent | 744144d28e3e7fddc117924fef88de5d9674fe4c (diff) | |
download | gitlab-ce-0653e08efd039a5905f3fa4f6e9cef9f5d2f799c.tar.gz |
Add latest changes from gitlab-org/gitlab@14-3-stable-eev14.3.0-rc42
Diffstat (limited to 'spec/workers')
24 files changed, 366 insertions, 577 deletions
diff --git a/spec/workers/authorized_project_update/user_refresh_from_replica_worker_spec.rb b/spec/workers/authorized_project_update/user_refresh_from_replica_worker_spec.rb index 027ce3b7f89..0da58343773 100644 --- a/spec/workers/authorized_project_update/user_refresh_from_replica_worker_spec.rb +++ b/spec/workers/authorized_project_update/user_refresh_from_replica_worker_spec.rb @@ -51,20 +51,5 @@ RSpec.describe AuthorizedProjectUpdate::UserRefreshFromReplicaWorker do execute_worker end end - - context 'when the feature flag `user_refresh_from_replica_worker_uses_replica_db` is disabled' do - before do - stub_feature_flags(user_refresh_from_replica_worker_uses_replica_db: false) - end - - it 'calls Users::RefreshAuthorizedProjectsService' do - source = 'AuthorizedProjectUpdate::UserRefreshFromReplicaWorker' - expect_next_instance_of(Users::RefreshAuthorizedProjectsService, user, { source: source }) do |service| - expect(service).to receive(:execute) - end - - execute_worker - end - end end end diff --git a/spec/workers/background_migration_worker_spec.rb b/spec/workers/background_migration_worker_spec.rb index 4575c270042..7892eb89e80 100644 --- a/spec/workers/background_migration_worker_spec.rb +++ b/spec/workers/background_migration_worker_spec.rb @@ -14,7 +14,17 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do describe '#perform' do before do allow(worker).to receive(:jid).and_return(1) - expect(worker).to receive(:always_perform?).and_return(false) + allow(worker).to receive(:always_perform?).and_return(false) + end + + it 'can run scheduled job and retried job concurrently' do + expect(Gitlab::BackgroundMigration) + .to receive(:perform) + .with('Foo', [10, 20]) + .exactly(2).time + + worker.perform('Foo', [10, 20]) + worker.perform('Foo', [10, 20], described_class::MAX_LEASE_ATTEMPTS - 1) end context 'when lease can be obtained' do @@ -39,7 +49,7 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do before do expect(Gitlab::BackgroundMigration).not_to receive(:perform) - worker.lease_for('Foo').try_obtain + worker.lease_for('Foo', false).try_obtain end it 'reschedules the migration and decrements the lease_attempts' do @@ -51,6 +61,10 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do end context 'when lease_attempts is 1' do + before do + worker.lease_for('Foo', true).try_obtain + end + it 'reschedules the migration and decrements the lease_attempts' do expect(described_class) .to receive(:perform_in) @@ -61,6 +75,10 @@ RSpec.describe BackgroundMigrationWorker, :clean_gitlab_redis_shared_state do end context 'when lease_attempts is 0' do + before do + worker.lease_for('Foo', true).try_obtain + end + it 'gives up performing the migration' do expect(described_class).not_to receive(:perform_in) expect(Sidekiq.logger).to receive(:warn).with( diff --git a/spec/workers/bulk_import_worker_spec.rb b/spec/workers/bulk_import_worker_spec.rb index 205bf23f36d..b67c5c62f76 100644 --- a/spec/workers/bulk_import_worker_spec.rb +++ b/spec/workers/bulk_import_worker_spec.rb @@ -84,17 +84,20 @@ RSpec.describe BulkImportWorker do expect { subject.perform(bulk_import.id) } .to change(BulkImports::Tracker, :count) - .by(BulkImports::Stage.pipelines.size * 2) + .by(BulkImports::Groups::Stage.pipelines.size * 2) expect(entity_1.trackers).not_to be_empty expect(entity_2.trackers).not_to be_empty end context 'when there are created entities to process' do - it 'marks a batch of entities as started, enqueues EntityWorker, ExportRequestWorker and reenqueues' do + let_it_be(:bulk_import) { create(:bulk_import, :created) } + + before do stub_const("#{described_class}::DEFAULT_BATCH_SIZE", 1) + end - bulk_import = create(:bulk_import, :created) + it 'marks a batch of entities as started, enqueues EntityWorker, ExportRequestWorker and reenqueues' do create(:bulk_import_entity, :created, bulk_import: bulk_import) create(:bulk_import_entity, :created, bulk_import: bulk_import) @@ -106,6 +109,16 @@ RSpec.describe BulkImportWorker do expect(bulk_import.entities.map(&:status_name)).to contain_exactly(:created, :started) end + + context 'when there are project entities to process' do + it 'does not enqueue ExportRequestWorker' do + create(:bulk_import_entity, :created, :project_entity, bulk_import: bulk_import) + + expect(BulkImports::ExportRequestWorker).not_to receive(:perform_async) + + subject.perform(bulk_import.id) + end + end end context 'when exception occurs' do diff --git a/spec/workers/bulk_imports/pipeline_worker_spec.rb b/spec/workers/bulk_imports/pipeline_worker_spec.rb index 972a4158194..56f28654ac5 100644 --- a/spec/workers/bulk_imports/pipeline_worker_spec.rb +++ b/spec/workers/bulk_imports/pipeline_worker_spec.rb @@ -21,6 +21,10 @@ RSpec.describe BulkImports::PipelineWorker do before do stub_const('FakePipeline', pipeline_class) + + allow(BulkImports::Groups::Stage) + .to receive(:pipelines) + .and_return([[0, pipeline_class]]) end it 'runs the given pipeline successfully' do @@ -30,12 +34,6 @@ RSpec.describe BulkImports::PipelineWorker do pipeline_name: 'FakePipeline' ) - expect(BulkImports::Stage) - .to receive(:pipeline_exists?) - .with('FakePipeline') - .twice - .and_return(true) - expect_next_instance_of(Gitlab::Import::Logger) do |logger| expect(logger) .to receive(:info) @@ -110,7 +108,7 @@ RSpec.describe BulkImports::PipelineWorker do expect(Gitlab::ErrorTracking) .to receive(:track_exception) .with( - instance_of(NameError), + instance_of(BulkImports::Error), entity_id: entity.id, pipeline_name: pipeline_tracker.pipeline_name ) @@ -157,10 +155,10 @@ RSpec.describe BulkImports::PipelineWorker do before do stub_const('NdjsonPipeline', ndjson_pipeline) - allow(BulkImports::Stage) - .to receive(:pipeline_exists?) - .with('NdjsonPipeline') - .and_return(true) + + allow(BulkImports::Groups::Stage) + .to receive(:pipelines) + .and_return([[0, ndjson_pipeline]]) end it 'runs the pipeline successfully' do diff --git a/spec/workers/ci/external_pull_requests/create_pipeline_worker_spec.rb b/spec/workers/ci/external_pull_requests/create_pipeline_worker_spec.rb new file mode 100644 index 00000000000..116a0e4d035 --- /dev/null +++ b/spec/workers/ci/external_pull_requests/create_pipeline_worker_spec.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Ci::ExternalPullRequests::CreatePipelineWorker do + let_it_be(:project) { create(:project, :auto_devops, :repository) } + let_it_be(:user) { project.owner } + let_it_be(:external_pull_request) do + branch = project.repository.branches.last + create(:external_pull_request, project: project, source_branch: branch.name, source_sha: branch.target) + end + + let(:worker) { described_class.new } + + describe '#perform' do + let(:project_id) { project.id } + let(:user_id) { user.id } + let(:external_pull_request_id) { external_pull_request.id } + + subject(:perform) { worker.perform(project_id, user_id, external_pull_request_id) } + + it 'creates the pipeline' do + pipeline = perform.payload + + expect(pipeline).to be_valid + expect(pipeline).to be_persisted + expect(pipeline).to be_external_pull_request_event + expect(pipeline.project).to eq(project) + expect(pipeline.user).to eq(user) + expect(pipeline.external_pull_request).to eq(external_pull_request) + expect(pipeline.status).to eq('created') + expect(pipeline.ref).to eq(external_pull_request.source_branch) + expect(pipeline.sha).to eq(external_pull_request.source_sha) + expect(pipeline.source_sha).to eq(external_pull_request.source_sha) + expect(pipeline.target_sha).to eq(external_pull_request.target_sha) + end + + shared_examples_for 'not calling service' do + it 'does not call the service' do + expect(Ci::CreatePipelineService).not_to receive(:new) + perform + end + end + + context 'when the project not found' do + let(:project_id) { non_existing_record_id } + + it_behaves_like 'not calling service' + end + + context 'when the user not found' do + let(:user_id) { non_existing_record_id } + + it_behaves_like 'not calling service' + end + + context 'when the pull request not found' do + let(:external_pull_request_id) { non_existing_record_id } + + it_behaves_like 'not calling service' + end + + context 'when the pull request does not belong to the project' do + let(:external_pull_request_id) { create(:external_pull_request).id } + + it_behaves_like 'not calling service' + end + end +end diff --git a/spec/workers/concerns/worker_attributes_spec.rb b/spec/workers/concerns/worker_attributes_spec.rb index d4b17c65f46..ad9d5eeccbe 100644 --- a/spec/workers/concerns/worker_attributes_spec.rb +++ b/spec/workers/concerns/worker_attributes_spec.rb @@ -35,45 +35,17 @@ RSpec.describe WorkerAttributes do end end - context 'when job is idempotent' do - context 'when data_consistency is not :always' do - it 'raise exception' do - worker.idempotent! - - expect { worker.data_consistency(:sticky) } - .to raise_error("Class can't be marked as idempotent if data_consistency is not set to :always") - end - end - - context 'when feature_flag is provided' do - before do - stub_feature_flags(test_feature_flag: false) - skip_feature_flags_yaml_validation - skip_default_enabled_yaml_check - end - - it 'returns correct feature flag value' do - worker.data_consistency(:sticky, feature_flag: :test_feature_flag) - - expect(worker.get_data_consistency_feature_flag_enabled?).not_to be_truthy - end + context 'when feature_flag is provided' do + before do + stub_feature_flags(test_feature_flag: false) + skip_feature_flags_yaml_validation + skip_default_enabled_yaml_check end - end - end - - describe '.idempotent!' do - it 'sets `idempotent` attribute of the worker class to true' do - worker.idempotent! - expect(worker.send(:class_attributes)[:idempotent]).to eq(true) - end - - context 'when data consistency is not :always' do - it 'raise exception' do - worker.data_consistency(:sticky) + it 'returns correct feature flag value' do + worker.data_consistency(:sticky, feature_flag: :test_feature_flag) - expect { worker.idempotent! } - .to raise_error("Class can't be marked as idempotent if data_consistency is not set to :always") + expect(worker.get_data_consistency_feature_flag_enabled?).not_to be_truthy end end end diff --git a/spec/workers/database/partition_management_worker_spec.rb b/spec/workers/database/partition_management_worker_spec.rb index 01b7f209b2d..9ded36743a8 100644 --- a/spec/workers/database/partition_management_worker_spec.rb +++ b/spec/workers/database/partition_management_worker_spec.rb @@ -6,16 +6,14 @@ RSpec.describe Database::PartitionManagementWorker do describe '#perform' do subject { described_class.new.perform } - let(:manager) { instance_double('PartitionManager', sync_partitions: nil) } let(:monitoring) { instance_double('PartitionMonitoring', report_metrics: nil) } before do - allow(Gitlab::Database::Partitioning::PartitionManager).to receive(:new).and_return(manager) allow(Gitlab::Database::Partitioning::PartitionMonitoring).to receive(:new).and_return(monitoring) end - it 'delegates to PartitionManager' do - expect(manager).to receive(:sync_partitions) + it 'delegates to Partitioning' do + expect(Gitlab::Database::Partitioning).to receive(:sync_partitions) subject end diff --git a/spec/workers/deployments/finished_worker_spec.rb b/spec/workers/deployments/finished_worker_spec.rb deleted file mode 100644 index d0a26ae1547..00000000000 --- a/spec/workers/deployments/finished_worker_spec.rb +++ /dev/null @@ -1,65 +0,0 @@ -# frozen_string_literal: true - -require 'spec_helper' - -RSpec.describe Deployments::FinishedWorker do - let(:worker) { described_class.new } - - describe '#perform' do - before do - allow(ProjectServiceWorker).to receive(:perform_async) - end - - it 'links merge requests to the deployment' do - deployment = create(:deployment) - service = instance_double(Deployments::LinkMergeRequestsService) - - expect(Deployments::LinkMergeRequestsService) - .to receive(:new) - .with(deployment) - .and_return(service) - - expect(service).to receive(:execute) - - worker.perform(deployment.id) - end - - it 'executes project services for deployment_hooks' do - deployment = create(:deployment) - project = deployment.project - service = create(:service, type: 'SlackService', project: project, deployment_events: true, active: true) - - worker.perform(deployment.id) - - expect(ProjectServiceWorker).to have_received(:perform_async).with(service.id, an_instance_of(Hash)) - end - - it 'does not execute an inactive service' do - deployment = create(:deployment) - project = deployment.project - create(:service, type: 'SlackService', project: project, deployment_events: true, active: false) - - worker.perform(deployment.id) - - expect(ProjectServiceWorker).not_to have_received(:perform_async) - end - - it 'does nothing if a deployment with the given id does not exist' do - worker.perform(0) - - expect(ProjectServiceWorker).not_to have_received(:perform_async) - end - - it 'execute webhooks' do - deployment = create(:deployment) - project = deployment.project - web_hook = create(:project_hook, deployment_events: true, project: project) - - expect_next_instance_of(WebHookService, web_hook, an_instance_of(Hash), "deployment_hooks") do |service| - expect(service).to receive(:async_execute) - end - - worker.perform(deployment.id) - end - end -end diff --git a/spec/workers/deployments/hooks_worker_spec.rb b/spec/workers/deployments/hooks_worker_spec.rb index 5d8edf85dd9..b4a91cff2ac 100644 --- a/spec/workers/deployments/hooks_worker_spec.rb +++ b/spec/workers/deployments/hooks_worker_spec.rb @@ -52,7 +52,6 @@ RSpec.describe Deployments::HooksWorker do it_behaves_like 'worker with data consistency', described_class, - feature_flag: :load_balancing_for_deployments_hooks_worker, data_consistency: :delayed end end diff --git a/spec/workers/deployments/success_worker_spec.rb b/spec/workers/deployments/success_worker_spec.rb deleted file mode 100644 index d9996e66919..00000000000 --- a/spec/workers/deployments/success_worker_spec.rb +++ /dev/null @@ -1,38 +0,0 @@ -# frozen_string_literal: true - -require 'spec_helper' - -RSpec.describe Deployments::SuccessWorker do - subject { described_class.new.perform(deployment&.id) } - - context 'when successful deployment' do - let(:deployment) { create(:deployment, :success) } - - it 'executes Deployments::UpdateEnvironmentService' do - expect(Deployments::UpdateEnvironmentService) - .to receive(:new).with(deployment).and_call_original - - subject - end - end - - context 'when canceled deployment' do - let(:deployment) { create(:deployment, :canceled) } - - it 'does not execute Deployments::UpdateEnvironmentService' do - expect(Deployments::UpdateEnvironmentService).not_to receive(:new) - - subject - end - end - - context 'when deploy record does not exist' do - let(:deployment) { nil } - - it 'does not execute Deployments::UpdateEnvironmentService' do - expect(Deployments::UpdateEnvironmentService).not_to receive(:new) - - subject - end - end -end diff --git a/spec/workers/environments/auto_stop_worker_spec.rb b/spec/workers/environments/auto_stop_worker_spec.rb new file mode 100644 index 00000000000..1983cfa18ea --- /dev/null +++ b/spec/workers/environments/auto_stop_worker_spec.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Environments::AutoStopWorker do + include CreateEnvironmentsHelpers + + subject { worker.perform(environment_id) } + + let_it_be(:project) { create(:project, :repository) } + let_it_be(:developer) { create(:user).tap { |u| project.add_developer(u) } } + let_it_be(:reporter) { create(:user).tap { |u| project.add_reporter(u) } } + + before_all do + project.repository.add_branch(developer, 'review/feature', 'master') + end + + let!(:environment) { create_review_app(user, project, 'review/feature').environment } + let(:environment_id) { environment.id } + let(:worker) { described_class.new } + let(:user) { developer } + + it 'stops the environment' do + expect { subject } + .to change { Environment.find_by_name('review/feature').state } + .from('available').to('stopped') + end + + it 'executes the stop action' do + expect { subject } + .to change { Ci::Build.find_by_name('stop_review_app').status } + .from('manual').to('pending') + end + + context 'when user does not have a permission to play the stop action' do + let(:user) { reporter } + + it 'raises an error' do + expect { subject }.to raise_error(Gitlab::Access::AccessDeniedError) + end + end + + context 'when the environment has already been stopped' do + before do + environment.stop! + end + + it 'does not execute the stop action' do + expect { subject } + .not_to change { Ci::Build.find_by_name('stop_review_app').status } + end + end + + context 'when there are no deployments and associted stop actions' do + let!(:environment) { create(:environment) } + + it 'stops the environment' do + subject + + expect(environment.reload).to be_stopped + end + end + + context 'when there are no corresponding environment record' do + let!(:environment) { double(:environment, id: non_existing_record_id) } + + it 'ignores the invalid record' do + expect { subject }.not_to raise_error + end + end +end diff --git a/spec/workers/every_sidekiq_worker_spec.rb b/spec/workers/every_sidekiq_worker_spec.rb index ea1f0153f83..235a1f6e3dd 100644 --- a/spec/workers/every_sidekiq_worker_spec.rb +++ b/spec/workers/every_sidekiq_worker_spec.rb @@ -436,6 +436,7 @@ RSpec.describe 'Every Sidekiq worker' do 'TodosDestroyer::ConfidentialEpicWorker' => 3, 'TodosDestroyer::ConfidentialIssueWorker' => 3, 'TodosDestroyer::DestroyedIssuableWorker' => 3, + 'TodosDestroyer::DestroyedDesignsWorker' => 3, 'TodosDestroyer::EntityLeaveWorker' => 3, 'TodosDestroyer::GroupPrivateWorker' => 3, 'TodosDestroyer::PrivateFeaturesWorker' => 3, @@ -452,6 +453,7 @@ RSpec.describe 'Every Sidekiq worker' do 'WaitForClusterCreationWorker' => 3, 'WebHookWorker' => 4, 'WebHooks::DestroyWorker' => 3, + 'WebHooks::LogExecutionWorker' => 3, 'Wikis::GitGarbageCollectWorker' => false, 'X509CertificateRevokeWorker' => 3 } diff --git a/spec/workers/expire_job_cache_worker_spec.rb b/spec/workers/expire_job_cache_worker_spec.rb index cbd9dd39336..6b14ccea105 100644 --- a/spec/workers/expire_job_cache_worker_spec.rb +++ b/spec/workers/expire_job_cache_worker_spec.rb @@ -13,44 +13,9 @@ RSpec.describe ExpireJobCacheWorker do let(:job_args) { job.id } - include_examples 'an idempotent worker' do - it 'invalidates Etag caching for the job path' do - job_path = "/#{project.full_path}/builds/#{job.id}.json" - - spy_store = Gitlab::EtagCaching::Store.new - - allow(Gitlab::EtagCaching::Store).to receive(:new) { spy_store } - - expect(spy_store).to receive(:touch) - .exactly(worker_exec_times).times - .with(job_path) - .and_call_original - - expect(ExpirePipelineCacheWorker).to receive(:perform_async) - .with(pipeline.id) - .exactly(worker_exec_times).times - - subject - end - end - - it 'does not perform extra queries', :aggregate_failures do - worker = described_class.new - recorder = ActiveRecord::QueryRecorder.new { worker.perform(job.id) } - - occurences = recorder.data.values.flat_map {|v| v[:occurrences]} - project_queries = occurences.select {|s| s.include?('FROM "projects"')} - namespace_queries = occurences.select {|s| s.include?('FROM "namespaces"')} - route_queries = occurences.select {|s| s.include?('FROM "routes"')} - - # This worker is run 1 million times an hour, so we need to save as much - # queries as possible. - expect(recorder.count).to be <= 1 - - expect(project_queries.size).to eq(0) - expect(namespace_queries.size).to eq(0) - expect(route_queries.size).to eq(0) - end + it_behaves_like 'worker with data consistency', + described_class, + data_consistency: :delayed end context 'when there is no job in the pipeline' do diff --git a/spec/workers/expire_pipeline_cache_worker_spec.rb b/spec/workers/expire_pipeline_cache_worker_spec.rb index 8c24aaa985b..f4c4df2e752 100644 --- a/spec/workers/expire_pipeline_cache_worker_spec.rb +++ b/spec/workers/expire_pipeline_cache_worker_spec.rb @@ -18,23 +18,6 @@ RSpec.describe ExpirePipelineCacheWorker do subject.perform(pipeline.id) end - it 'does not perform extra queries', :aggregate_failures do - recorder = ActiveRecord::QueryRecorder.new { subject.perform(pipeline.id) } - - project_queries = recorder.data.values.flat_map {|v| v[:occurrences]}.select {|s| s.include?('FROM "projects"')} - namespace_queries = recorder.data.values.flat_map {|v| v[:occurrences]}.select {|s| s.include?('FROM "namespaces"')} - route_queries = recorder.data.values.flat_map {|v| v[:occurrences]}.select {|s| s.include?('FROM "routes"')} - - # This worker is run 1 million times an hour, so we need to save as much - # queries as possible. - expect(recorder.count).to be <= 6 - - # These arises from #update_etag_cache - expect(project_queries.size).to eq(1) - expect(namespace_queries.size).to eq(1) - expect(route_queries.size).to eq(1) - end - it "doesn't do anything if the pipeline not exist" do expect_any_instance_of(Ci::ExpirePipelineCacheService).not_to receive(:execute) expect_any_instance_of(Gitlab::EtagCaching::Store).not_to receive(:touch) diff --git a/spec/workers/gitlab/github_import/stage/import_issues_and_diff_notes_worker_spec.rb b/spec/workers/gitlab/github_import/stage/import_issues_and_diff_notes_worker_spec.rb index f2a28ec40b8..c0dd4f488cc 100644 --- a/spec/workers/gitlab/github_import/stage/import_issues_and_diff_notes_worker_spec.rb +++ b/spec/workers/gitlab/github_import/stage/import_issues_and_diff_notes_worker_spec.rb @@ -10,7 +10,7 @@ RSpec.describe Gitlab::GithubImport::Stage::ImportIssuesAndDiffNotesWorker do it 'imports the issues and diff notes' do client = double(:client) - described_class::IMPORTERS.each do |klass| + worker.importers(project).each do |klass| importer = double(:importer) waiter = Gitlab::JobWaiter.new(2, '123') @@ -31,4 +31,45 @@ RSpec.describe Gitlab::GithubImport::Stage::ImportIssuesAndDiffNotesWorker do worker.import(client, project) end end + + describe '#importers' do + context 'when project group is present' do + let_it_be(:project) { create(:project) } + let_it_be(:group) { create(:group, projects: [project]) } + + context 'when feature flag github_importer_single_endpoint_notes_import is enabled' do + it 'includes single endpoint diff notes importer' do + project = create(:project) + group = create(:group, projects: [project]) + + stub_feature_flags(github_importer_single_endpoint_notes_import: group) + + expect(worker.importers(project)).to contain_exactly( + Gitlab::GithubImport::Importer::IssuesImporter, + Gitlab::GithubImport::Importer::SingleEndpointDiffNotesImporter + ) + end + end + + context 'when feature flag github_importer_single_endpoint_notes_import is disabled' do + it 'includes default diff notes importer' do + stub_feature_flags(github_importer_single_endpoint_notes_import: false) + + expect(worker.importers(project)).to contain_exactly( + Gitlab::GithubImport::Importer::IssuesImporter, + Gitlab::GithubImport::Importer::DiffNotesImporter + ) + end + end + end + + context 'when project group is missing' do + it 'includes default diff notes importer' do + expect(worker.importers(project)).to contain_exactly( + Gitlab::GithubImport::Importer::IssuesImporter, + Gitlab::GithubImport::Importer::DiffNotesImporter + ) + end + end + end end diff --git a/spec/workers/gitlab/github_import/stage/import_notes_worker_spec.rb b/spec/workers/gitlab/github_import/stage/import_notes_worker_spec.rb index 73b19239f4a..f9f21e4dfa2 100644 --- a/spec/workers/gitlab/github_import/stage/import_notes_worker_spec.rb +++ b/spec/workers/gitlab/github_import/stage/import_notes_worker_spec.rb @@ -8,18 +8,21 @@ RSpec.describe Gitlab::GithubImport::Stage::ImportNotesWorker do describe '#import' do it 'imports all the notes' do - importer = double(:importer) client = double(:client) - waiter = Gitlab::JobWaiter.new(2, '123') - expect(Gitlab::GithubImport::Importer::NotesImporter) - .to receive(:new) - .with(project, client) - .and_return(importer) + worker.importers(project).each do |klass| + importer = double(:importer) + waiter = Gitlab::JobWaiter.new(2, '123') - expect(importer) - .to receive(:execute) - .and_return(waiter) + expect(klass) + .to receive(:new) + .with(project, client) + .and_return(importer) + + expect(importer) + .to receive(:execute) + .and_return(waiter) + end expect(Gitlab::GithubImport::AdvanceStageWorker) .to receive(:perform_async) @@ -28,4 +31,43 @@ RSpec.describe Gitlab::GithubImport::Stage::ImportNotesWorker do worker.import(client, project) end end + + describe '#importers' do + context 'when project group is present' do + let_it_be(:project) { create(:project) } + let_it_be(:group) { create(:group, projects: [project]) } + + context 'when feature flag github_importer_single_endpoint_notes_import is enabled' do + it 'includes single endpoint mr and issue notes importers' do + project = create(:project) + group = create(:group, projects: [project]) + + stub_feature_flags(github_importer_single_endpoint_notes_import: group) + + expect(worker.importers(project)).to contain_exactly( + Gitlab::GithubImport::Importer::SingleEndpointMergeRequestNotesImporter, + Gitlab::GithubImport::Importer::SingleEndpointIssueNotesImporter + ) + end + end + + context 'when feature flag github_importer_single_endpoint_notes_import is disabled' do + it 'includes default notes importer' do + stub_feature_flags(github_importer_single_endpoint_notes_import: false) + + expect(worker.importers(project)).to contain_exactly( + Gitlab::GithubImport::Importer::NotesImporter + ) + end + end + end + + context 'when project group is missing' do + it 'includes default diff notes importer' do + expect(worker.importers(project)).to contain_exactly( + Gitlab::GithubImport::Importer::NotesImporter + ) + end + end + end end diff --git a/spec/workers/issue_rebalancing_worker_spec.rb b/spec/workers/issue_rebalancing_worker_spec.rb index b6e9429d78e..cba42a1577e 100644 --- a/spec/workers/issue_rebalancing_worker_spec.rb +++ b/spec/workers/issue_rebalancing_worker_spec.rb @@ -8,41 +8,29 @@ RSpec.describe IssueRebalancingWorker do let_it_be(:project) { create(:project, group: group) } let_it_be(:issue) { create(:issue, project: project) } - context 'when block_issue_repositioning is enabled' do - before do - stub_feature_flags(block_issue_repositioning: group) - end - - it 'does not run an instance of IssueRebalancingService' do - expect(IssueRebalancingService).not_to receive(:new) - - described_class.new.perform(nil, issue.project_id) - end - end - shared_examples 'running the worker' do - it 'runs an instance of IssueRebalancingService' do + it 'runs an instance of Issues::RelativePositionRebalancingService' do service = double(execute: nil) service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class) - expect(IssueRebalancingService).to receive(:new).with(service_param).and_return(service) + expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service) described_class.new.perform(*arguments) end - it 'anticipates there being too many issues' do + it 'anticipates there being too many concurent rebalances' do service = double service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class) - allow(service).to receive(:execute).and_raise(IssueRebalancingService::TooManyIssues) - expect(IssueRebalancingService).to receive(:new).with(service_param).and_return(service) - expect(Gitlab::ErrorTracking).to receive(:log_exception).with(IssueRebalancingService::TooManyIssues, include(project_id: arguments.second, root_namespace_id: arguments.third)) + allow(service).to receive(:execute).and_raise(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances) + expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service) + expect(Gitlab::ErrorTracking).to receive(:log_exception).with(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances, include(project_id: arguments.second, root_namespace_id: arguments.third)) described_class.new.perform(*arguments) end it 'takes no action if the value is nil' do - expect(IssueRebalancingService).not_to receive(:new) + expect(Issues::RelativePositionRebalancingService).not_to receive(:new) expect(Gitlab::ErrorTracking).not_to receive(:log_exception) described_class.new.perform # all arguments are nil @@ -52,7 +40,7 @@ RSpec.describe IssueRebalancingWorker do shared_examples 'safely handles non-existent ids' do it 'anticipates the inability to find the issue' do expect(Gitlab::ErrorTracking).to receive(:log_exception).with(ArgumentError, include(project_id: arguments.second, root_namespace_id: arguments.third)) - expect(IssueRebalancingService).not_to receive(:new) + expect(Issues::RelativePositionRebalancingService).not_to receive(:new) described_class.new.perform(*arguments) end diff --git a/spec/workers/namespaceless_project_destroy_worker_spec.rb b/spec/workers/namespaceless_project_destroy_worker_spec.rb index cd66af82364..93e8415f3bb 100644 --- a/spec/workers/namespaceless_project_destroy_worker_spec.rb +++ b/spec/workers/namespaceless_project_destroy_worker_spec.rb @@ -48,12 +48,6 @@ RSpec.describe NamespacelessProjectDestroyWorker do subject.perform(project.id) end - - it 'does not do anything in Project#legacy_remove_pages method' do - expect(Gitlab::PagesTransfer).not_to receive(:new) - - subject.perform(project.id) - end end context 'project forked from another' do diff --git a/spec/workers/packages/helm/extraction_worker_spec.rb b/spec/workers/packages/helm/extraction_worker_spec.rb index 258413a3410..daebbda3077 100644 --- a/spec/workers/packages/helm/extraction_worker_spec.rb +++ b/spec/workers/packages/helm/extraction_worker_spec.rb @@ -23,10 +23,10 @@ RSpec.describe Packages::Helm::ExtractionWorker, type: :worker do subject { described_class.new.perform(channel, package_file_id) } - shared_examples 'handling error' do + shared_examples 'handling error' do |error_class = Packages::Helm::ExtractFileMetadataService::ExtractionError| it 'mark the package as errored', :aggregate_failures do expect(Gitlab::ErrorTracking).to receive(:log_exception).with( - instance_of(Packages::Helm::ExtractFileMetadataService::ExtractionError), + instance_of(error_class), project_id: package_file.package.project_id ) expect { subject } @@ -88,5 +88,15 @@ RSpec.describe Packages::Helm::ExtractionWorker, type: :worker do it_behaves_like 'handling error' end + + context 'with an invalid Chart.yaml' do + before do + expect_next_instance_of(Gem::Package::TarReader::Entry) do |entry| + expect(entry).to receive(:read).and_return('{}') + end + end + + it_behaves_like 'handling error', ActiveRecord::RecordInvalid + end end end diff --git a/spec/workers/pages_remove_worker_spec.rb b/spec/workers/pages_remove_worker_spec.rb index 864aa763fa9..9d49088b371 100644 --- a/spec/workers/pages_remove_worker_spec.rb +++ b/spec/workers/pages_remove_worker_spec.rb @@ -3,23 +3,9 @@ require 'spec_helper' RSpec.describe PagesRemoveWorker do - let(:project) { create(:project, path: "my.project")} - let!(:domain) { create(:pages_domain, project: project) } - - subject { described_class.new.perform(project.id) } - - before do - project.mark_pages_as_deployed - end - - it 'deletes published pages' do - expect(project.pages_deployed?).to be(true) - - expect_any_instance_of(Gitlab::PagesTransfer).to receive(:rename_project).and_return true - expect(PagesWorker).to receive(:perform_in).with(5.minutes, :remove, project.namespace.full_path, anything) - - subject - - expect(project.reload.pages_deployed?).to be(false) + it 'does not raise error' do + expect do + described_class.new.perform(create(:project).id) + end.not_to raise_error end end diff --git a/spec/workers/post_receive_spec.rb b/spec/workers/post_receive_spec.rb index c111c3164eb..ddd295215a1 100644 --- a/spec/workers/post_receive_spec.rb +++ b/spec/workers/post_receive_spec.rb @@ -22,6 +22,8 @@ RSpec.describe PostReceive do create(:project, :repository, auto_cancel_pending_pipelines: 'disabled') end + let(:job_args) { [gl_repository, key_id, base64_changes] } + def perform(changes: base64_changes) described_class.new.perform(gl_repository, key_id, changes) end @@ -282,6 +284,8 @@ RSpec.describe PostReceive do end end end + + it_behaves_like 'an idempotent worker' end describe '#process_wiki_changes' do @@ -352,6 +356,8 @@ RSpec.describe PostReceive do perform end end + + it_behaves_like 'an idempotent worker' end context 'webhook' do @@ -458,6 +464,8 @@ RSpec.describe PostReceive do end end end + + it_behaves_like 'an idempotent worker' end context 'with PersonalSnippet' do @@ -484,5 +492,7 @@ RSpec.describe PostReceive do described_class.new.perform(gl_repository, key_id, base64_changes) end + + it_behaves_like 'an idempotent worker' end end diff --git a/spec/workers/purge_dependency_proxy_cache_worker_spec.rb b/spec/workers/purge_dependency_proxy_cache_worker_spec.rb index 53f8d1bf5ba..393745958be 100644 --- a/spec/workers/purge_dependency_proxy_cache_worker_spec.rb +++ b/spec/workers/purge_dependency_proxy_cache_worker_spec.rb @@ -11,14 +11,9 @@ RSpec.describe PurgeDependencyProxyCacheWorker do subject { described_class.new.perform(user.id, group_id) } - before do - stub_config(dependency_proxy: { enabled: true }) - group.create_dependency_proxy_setting!(enabled: true) - end - describe '#perform' do - shared_examples 'returns nil' do - it 'returns nil', :aggregate_failures do + shared_examples 'not removing blobs and manifests' do + it 'does not remove blobs and manifests', :aggregate_failures do expect { subject }.not_to change { group.dependency_proxy_blobs.size } expect { subject }.not_to change { group.dependency_proxy_manifests.size } expect(subject).to be_nil @@ -43,26 +38,26 @@ RSpec.describe PurgeDependencyProxyCacheWorker do end context 'when admin mode is disabled' do - it_behaves_like 'returns nil' + it_behaves_like 'not removing blobs and manifests' end end context 'a non-admin user' do let(:user) { create(:user) } - it_behaves_like 'returns nil' + it_behaves_like 'not removing blobs and manifests' end context 'an invalid user id' do let(:user) { double('User', id: 99999 ) } - it_behaves_like 'returns nil' + it_behaves_like 'not removing blobs and manifests' end context 'an invalid group' do let(:group_id) { 99999 } - it_behaves_like 'returns nil' + it_behaves_like 'not removing blobs and manifests' end end end diff --git a/spec/workers/stuck_ci_jobs_worker_spec.rb b/spec/workers/stuck_ci_jobs_worker_spec.rb index 84b2d87494e..e0a5d3c6c1c 100644 --- a/spec/workers/stuck_ci_jobs_worker_spec.rb +++ b/spec/workers/stuck_ci_jobs_worker_spec.rb @@ -5,311 +5,50 @@ require 'spec_helper' RSpec.describe StuckCiJobsWorker do include ExclusiveLeaseHelpers - let!(:runner) { create :ci_runner } - let!(:job) { create :ci_build, runner: runner } - let(:worker_lease_key) { StuckCiJobsWorker::EXCLUSIVE_LEASE_KEY } + let(:worker_lease_key) { StuckCiJobsWorker::EXCLUSIVE_LEASE_KEY } let(:worker_lease_uuid) { SecureRandom.uuid } - let(:created_at) { } - let(:updated_at) { } + let(:worker2) { described_class.new } subject(:worker) { described_class.new } before do stub_exclusive_lease(worker_lease_key, worker_lease_uuid) - job_attributes = { status: status } - job_attributes[:created_at] = created_at if created_at - job_attributes[:updated_at] = updated_at if updated_at - job.update!(job_attributes) end - shared_examples 'job is dropped' do - it "changes status" do - worker.perform - job.reload - - expect(job).to be_failed - expect(job).to be_stuck_or_timeout_failure - end - - context 'when job have data integrity problem' do - it "does drop the job and logs the reason" do - job.update_columns(yaml_variables: '[{"key" => "value"}]') - - expect(Gitlab::ErrorTracking).to receive(:track_exception) - .with(anything, a_hash_including(build_id: job.id)) - .once - .and_call_original - - worker.perform - job.reload - - expect(job).to be_failed - expect(job).to be_data_integrity_failure + describe '#perform' do + it 'executes an instance of Ci::StuckBuildsDropService' do + expect_next_instance_of(Ci::StuckBuilds::DropService) do |service| + expect(service).to receive(:execute).exactly(:once) end - end - end - shared_examples 'job is unchanged' do - before do worker.perform - job.reload end - it "doesn't change status" do - expect(job.status).to eq(status) - end - end - - context 'when job is pending' do - let(:status) { 'pending' } - - context 'when job is not stuck' do - before do - allow_any_instance_of(Ci::Build).to receive(:stuck?).and_return(false) - end - - context 'when job was updated_at more than 1 day ago' do - let(:updated_at) { 1.5.days.ago } - - context 'when created_at is the same as updated_at' do - let(:created_at) { 1.5.days.ago } - - it_behaves_like 'job is dropped' - end - - context 'when created_at is before updated_at' do - let(:created_at) { 3.days.ago } - - it_behaves_like 'job is dropped' - end - - context 'when created_at is outside lookback window' do - let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } - - it_behaves_like 'job is unchanged' - end - end - - context 'when job was updated less than 1 day ago' do - let(:updated_at) { 6.hours.ago } - - context 'when created_at is the same as updated_at' do - let(:created_at) { 1.5.days.ago } - - it_behaves_like 'job is unchanged' - end - - context 'when created_at is before updated_at' do - let(:created_at) { 3.days.ago } - - it_behaves_like 'job is unchanged' - end - - context 'when created_at is outside lookback window' do - let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } - - it_behaves_like 'job is unchanged' - end - end - - context 'when job was updated more than 1 hour ago' do - let(:updated_at) { 2.hours.ago } - - context 'when created_at is the same as updated_at' do - let(:created_at) { 2.hours.ago } - - it_behaves_like 'job is unchanged' - end - - context 'when created_at is before updated_at' do - let(:created_at) { 3.days.ago } - - it_behaves_like 'job is unchanged' - end - - context 'when created_at is outside lookback window' do - let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } - - it_behaves_like 'job is unchanged' - end - end - end - - context 'when job is stuck' do - before do - allow_any_instance_of(Ci::Build).to receive(:stuck?).and_return(true) - end - - context 'when job was updated_at more than 1 hour ago' do - let(:updated_at) { 1.5.hours.ago } - - context 'when created_at is the same as updated_at' do - let(:created_at) { 1.5.hours.ago } - - it_behaves_like 'job is dropped' - end - - context 'when created_at is before updated_at' do - let(:created_at) { 3.days.ago } - - it_behaves_like 'job is dropped' - end - - context 'when created_at is outside lookback window' do - let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } - - it_behaves_like 'job is unchanged' - end - end - - context 'when job was updated in less than 1 hour ago' do - let(:updated_at) { 30.minutes.ago } - - context 'when created_at is the same as updated_at' do - let(:created_at) { 30.minutes.ago } - - it_behaves_like 'job is unchanged' - end - - context 'when created_at is before updated_at' do - let(:created_at) { 2.days.ago } - - it_behaves_like 'job is unchanged' - end + context 'with an exclusive lease' do + it 'does not execute concurrently' do + expect(worker).to receive(:remove_lease).exactly(:once) + expect(worker2).not_to receive(:remove_lease) - context 'when created_at is outside lookback window' do - let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } - - it_behaves_like 'job is unchanged' - end - end - end - end - - context 'when job is running' do - let(:status) { 'running' } - - context 'when job was updated_at more than an hour ago' do - let(:updated_at) { 2.hours.ago } - - it_behaves_like 'job is dropped' - end - - context 'when job was updated in less than 1 hour ago' do - let(:updated_at) { 30.minutes.ago } - - it_behaves_like 'job is unchanged' - end - end - - %w(success skipped failed canceled).each do |status| - context "when job is #{status}" do - let(:status) { status } - let(:updated_at) { 2.days.ago } - - context 'when created_at is the same as updated_at' do - let(:created_at) { 2.days.ago } - - it_behaves_like 'job is unchanged' - end - - context 'when created_at is before updated_at' do - let(:created_at) { 3.days.ago } - - it_behaves_like 'job is unchanged' - end + worker.perform - context 'when created_at is outside lookback window' do - let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + stub_exclusive_lease_taken(worker_lease_key) - it_behaves_like 'job is unchanged' + worker2.perform end - end - end - - context 'for deleted project' do - let(:status) { 'running' } - let(:updated_at) { 2.days.ago } - - before do - job.project.update!(pending_delete: true) - end - - it 'does drop job' do - expect_any_instance_of(Ci::Build).to receive(:drop).and_call_original - worker.perform - end - end - - describe 'drop stale scheduled builds' do - let(:status) { 'scheduled' } - let(:updated_at) { } - - context 'when scheduled at 2 hours ago but it is not executed yet' do - let!(:job) { create(:ci_build, :scheduled, scheduled_at: 2.hours.ago) } - it 'drops the stale scheduled build' do - expect(Ci::Build.scheduled.count).to eq(1) - expect(job).to be_scheduled + it 'can execute in sequence' do + expect(worker).to receive(:remove_lease).at_least(:once) + expect(worker2).to receive(:remove_lease).at_least(:once) worker.perform - job.reload - - expect(Ci::Build.scheduled.count).to eq(0) - expect(job).to be_failed - expect(job).to be_stale_schedule + worker2.perform end - end - - context 'when scheduled at 30 minutes ago but it is not executed yet' do - let!(:job) { create(:ci_build, :scheduled, scheduled_at: 30.minutes.ago) } - it 'does not drop the stale scheduled build yet' do - expect(Ci::Build.scheduled.count).to eq(1) - expect(job).to be_scheduled + it 'cancels exclusive leases after worker perform' do + expect_to_cancel_exclusive_lease(worker_lease_key, worker_lease_uuid) worker.perform - - expect(Ci::Build.scheduled.count).to eq(1) - expect(job).to be_scheduled - end - end - - context 'when there are no stale scheduled builds' do - it 'does not drop the stale scheduled build yet' do - expect { worker.perform }.not_to raise_error end end end - - describe 'exclusive lease' do - let(:status) { 'running' } - let(:updated_at) { 2.days.ago } - let(:worker2) { described_class.new } - - it 'is guard by exclusive lease when executed concurrently' do - expect(worker).to receive(:drop).at_least(:once).and_call_original - expect(worker2).not_to receive(:drop) - - worker.perform - - stub_exclusive_lease_taken(worker_lease_key) - - worker2.perform - end - - it 'can be executed in sequence' do - expect(worker).to receive(:drop).at_least(:once).and_call_original - expect(worker2).to receive(:drop).at_least(:once).and_call_original - - worker.perform - worker2.perform - end - - it 'cancels exclusive leases after worker perform' do - expect_to_cancel_exclusive_lease(worker_lease_key, worker_lease_uuid) - - worker.perform - end - end end diff --git a/spec/workers/todos_destroyer/destroyed_designs_worker_spec.rb b/spec/workers/todos_destroyer/destroyed_designs_worker_spec.rb new file mode 100644 index 00000000000..113faeb0d2f --- /dev/null +++ b/spec/workers/todos_destroyer/destroyed_designs_worker_spec.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe TodosDestroyer::DestroyedDesignsWorker do + let(:service) { double } + + it 'calls the Todos::Destroy::DesignService with design_ids parameter' do + expect(::Todos::Destroy::DesignService).to receive(:new).with([1, 5]).and_return(service) + expect(service).to receive(:execute) + + described_class.new.perform([1, 5]) + end +end |