diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2021-09-20 13:18:24 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2021-09-20 13:18:24 +0000 |
commit | 0653e08efd039a5905f3fa4f6e9cef9f5d2f799c (patch) | |
tree | 4dcc884cf6d81db44adae4aa99f8ec1233a41f55 /spec/services/ci | |
parent | 744144d28e3e7fddc117924fef88de5d9674fe4c (diff) | |
download | gitlab-ce-0653e08efd039a5905f3fa4f6e9cef9f5d2f799c.tar.gz |
Add latest changes from gitlab-org/gitlab@14-3-stable-eev14.3.0-rc42
Diffstat (limited to 'spec/services/ci')
13 files changed, 813 insertions, 140 deletions
diff --git a/spec/services/ci/after_requeue_job_service_spec.rb b/spec/services/ci/after_requeue_job_service_spec.rb index df5ddcafb37..2a5a971fdac 100644 --- a/spec/services/ci/after_requeue_job_service_spec.rb +++ b/spec/services/ci/after_requeue_job_service_spec.rb @@ -44,16 +44,6 @@ RSpec.describe Ci::AfterRequeueJobService do it 'marks subsequent skipped jobs as processable' do expect { execute_service }.to change { test4.reload.status }.from('skipped').to('created') end - - context 'with ci_same_stage_job_needs FF disabled' do - before do - stub_feature_flags(ci_same_stage_job_needs: false) - end - - it 'does nothing with the build' do - expect { execute_service }.not_to change { test4.reload.status } - end - end end context 'when the pipeline is a downstream pipeline and the bridge is depended' do diff --git a/spec/services/ci/archive_trace_service_spec.rb b/spec/services/ci/archive_trace_service_spec.rb index 12804efc28c..071b5c3b2f9 100644 --- a/spec/services/ci/archive_trace_service_spec.rb +++ b/spec/services/ci/archive_trace_service_spec.rb @@ -12,6 +12,7 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do expect { subject }.not_to raise_error expect(job.reload.job_artifacts_trace).to be_exist + expect(job.trace_metadata.trace_artifact).to eq(job.job_artifacts_trace) end context 'when trace is already archived' do @@ -27,7 +28,7 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do context 'when live trace chunks still exist' do before do - create(:ci_build_trace_chunk, build: job) + create(:ci_build_trace_chunk, build: job, chunk_index: 0) end it 'removes the trace chunks' do @@ -39,8 +40,14 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do job.job_artifacts_trace.file.remove! end - it 'removes the trace artifact' do - expect { subject }.to change { job.reload.job_artifacts_trace }.to(nil) + it 'removes the trace artifact and builds a new one' do + existing_trace = job.job_artifacts_trace + expect(existing_trace).to receive(:destroy!).and_call_original + + subject + + expect(job.reload.job_artifacts_trace).to be_present + expect(job.reload.job_artifacts_trace.file.file).to be_present end end end @@ -59,6 +66,54 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do end end + context 'when the job is out of archival attempts' do + before do + create(:ci_build_trace_metadata, + build: job, + archival_attempts: Ci::BuildTraceMetadata::MAX_ATTEMPTS + 1, + last_archival_attempt_at: 1.week.ago) + end + + it 'skips archiving' do + expect(job.trace).not_to receive(:archive!) + + subject + end + + it 'leaves a warning message in sidekiq log' do + expect(Sidekiq.logger).to receive(:warn).with( + class: Ci::ArchiveTraceWorker.name, + message: 'The job is out of archival attempts.', + job_id: job.id) + + subject + end + end + + context 'when the archival process is backed off' do + before do + create(:ci_build_trace_metadata, + build: job, + archival_attempts: Ci::BuildTraceMetadata::MAX_ATTEMPTS - 1, + last_archival_attempt_at: 1.hour.ago) + end + + it 'skips archiving' do + expect(job.trace).not_to receive(:archive!) + + subject + end + + it 'leaves a warning message in sidekiq log' do + expect(Sidekiq.logger).to receive(:warn).with( + class: Ci::ArchiveTraceWorker.name, + message: 'The job can not be archived right now.', + job_id: job.id) + + subject + end + end + context 'when job failed to archive trace but did not raise an exception' do before do allow_next_instance_of(Gitlab::Ci::Trace) do |instance| @@ -98,6 +153,7 @@ RSpec.describe Ci::ArchiveTraceService, '#execute' do .and_call_original expect { subject }.not_to raise_error + expect(job.trace_metadata.archival_attempts).to eq(1) end end end diff --git a/spec/services/ci/create_pipeline_service/parent_child_pipeline_spec.rb b/spec/services/ci/create_pipeline_service/parent_child_pipeline_spec.rb index 6eb1315fff4..4326fa5533f 100644 --- a/spec/services/ci/create_pipeline_service/parent_child_pipeline_spec.rb +++ b/spec/services/ci/create_pipeline_service/parent_child_pipeline_spec.rb @@ -127,6 +127,32 @@ RSpec.describe Ci::CreatePipelineService, '#execute' do end end end + + context 'when resource group key includes a variable' do + let(:config) do + <<~YAML + instrumentation_test: + stage: test + resource_group: $CI_ENVIRONMENT_NAME + trigger: + include: path/to/child.yml + strategy: depend + YAML + end + + it 'ignores the resource group keyword because it fails to expand the variable', :aggregate_failures do + pipeline = create_pipeline! + Ci::InitialPipelineProcessWorker.new.perform(pipeline.id) + + test = pipeline.statuses.find_by(name: 'instrumentation_test') + expect(pipeline).to be_created_successfully + expect(pipeline.triggered_pipelines).not_to be_exist + expect(project.resource_groups.count).to eq(0) + expect(test).to be_a Ci::Bridge + expect(test).to be_pending + expect(test.resource_group).to be_nil + end + end end end diff --git a/spec/services/ci/create_pipeline_service/tags_spec.rb b/spec/services/ci/create_pipeline_service/tags_spec.rb new file mode 100644 index 00000000000..335d35010c8 --- /dev/null +++ b/spec/services/ci/create_pipeline_service/tags_spec.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true +require 'spec_helper' + +RSpec.describe Ci::CreatePipelineService do + describe 'tags:' do + let_it_be(:project) { create(:project, :repository) } + let_it_be(:user) { project.owner } + + let(:ref) { 'refs/heads/master' } + let(:source) { :push } + let(:service) { described_class.new(project, user, { ref: ref }) } + let(:pipeline) { service.execute(source).payload } + + before do + stub_ci_pipeline_yaml_file(config) + end + + context 'with valid config' do + let(:config) { YAML.dump({ test: { script: 'ls', tags: %w[tag1 tag2] } }) } + + it 'creates a pipeline', :aggregate_failures do + expect(pipeline).to be_created_successfully + expect(pipeline.builds.first.tag_list).to match_array(%w[tag1 tag2]) + end + end + + context 'with too many tags' do + let(:tags) { Array.new(50) {|i| "tag-#{i}" } } + let(:config) { YAML.dump({ test: { script: 'ls', tags: tags } }) } + + it 'creates a pipeline without builds', :aggregate_failures do + expect(pipeline).not_to be_created_successfully + expect(pipeline.builds).to be_empty + expect(pipeline.yaml_errors).to eq("jobs:test:tags config must be less than the limit of #{Gitlab::Ci::Config::Entry::Tags::TAGS_LIMIT} tags") + end + end + end +end diff --git a/spec/services/ci/create_pipeline_service_spec.rb b/spec/services/ci/create_pipeline_service_spec.rb index 2fdb0ed3c0d..78646665539 100644 --- a/spec/services/ci/create_pipeline_service_spec.rb +++ b/spec/services/ci/create_pipeline_service_spec.rb @@ -11,7 +11,7 @@ RSpec.describe Ci::CreatePipelineService do let(:ref_name) { 'refs/heads/master' } before do - stub_ci_pipeline_yaml_file(gitlab_ci_yaml) + stub_ci_pipeline_to_return_yaml_file end describe '#execute' do @@ -991,6 +991,58 @@ RSpec.describe Ci::CreatePipelineService do end end + context 'when resource group is defined for review app deployment' do + before do + config = YAML.dump( + review_app: { + stage: 'test', + script: 'deploy', + environment: { + name: 'review/$CI_COMMIT_REF_SLUG', + on_stop: 'stop_review_app' + }, + resource_group: '$CI_ENVIRONMENT_NAME' + }, + stop_review_app: { + stage: 'test', + script: 'stop', + when: 'manual', + environment: { + name: 'review/$CI_COMMIT_REF_SLUG', + action: 'stop' + }, + resource_group: '$CI_ENVIRONMENT_NAME' + } + ) + + stub_ci_pipeline_yaml_file(config) + end + + it 'persists the association correctly' do + result = execute_service.payload + deploy_job = result.builds.find_by_name!(:review_app) + stop_job = result.builds.find_by_name!(:stop_review_app) + + expect(result).to be_persisted + expect(deploy_job.resource_group.key).to eq('review/master') + expect(stop_job.resource_group.key).to eq('review/master') + expect(project.resource_groups.count).to eq(1) + end + + it 'initializes scoped variables only once for each build' do + # Bypassing `stub_build` hack because it distrubs the expectations below. + allow_next_instances_of(Gitlab::Ci::Build::Context::Build, 2) do |build_context| + allow(build_context).to receive(:variables) { Gitlab::Ci::Variables::Collection.new } + end + + expect_next_instances_of(::Ci::Build, 2) do |ci_build| + expect(ci_build).to receive(:scoped_variables).once.and_call_original + end + + expect(execute_service.payload).to be_created_successfully + end + end + context 'with timeout' do context 'when builds with custom timeouts are configured' do before do @@ -1248,16 +1300,47 @@ RSpec.describe Ci::CreatePipelineService do end context 'when pipeline variables are specified' do - let(:variables_attributes) do - [{ key: 'first', secret_value: 'world' }, - { key: 'second', secret_value: 'second_world' }] + subject(:pipeline) { execute_service(variables_attributes: variables_attributes).payload } + + context 'with valid pipeline variables' do + let(:variables_attributes) do + [{ key: 'first', secret_value: 'world' }, + { key: 'second', secret_value: 'second_world' }] + end + + it 'creates a pipeline with specified variables' do + expect(pipeline.variables.map { |var| var.slice(:key, :secret_value) }) + .to eq variables_attributes.map(&:with_indifferent_access) + end end - subject(:pipeline) { execute_service(variables_attributes: variables_attributes).payload } + context 'with duplicate pipeline variables' do + let(:variables_attributes) do + [{ key: 'hello', secret_value: 'world' }, + { key: 'hello', secret_value: 'second_world' }] + end - it 'creates a pipeline with specified variables' do - expect(pipeline.variables.map { |var| var.slice(:key, :secret_value) }) - .to eq variables_attributes.map(&:with_indifferent_access) + it 'fails to create the pipeline' do + expect(pipeline).to be_failed + expect(pipeline.variables).to be_empty + expect(pipeline.errors[:base]).to eq(['Duplicate variable name: hello']) + end + end + + context 'with more than one duplicate pipeline variable' do + let(:variables_attributes) do + [{ key: 'hello', secret_value: 'world' }, + { key: 'hello', secret_value: 'second_world' }, + { key: 'single', secret_value: 'variable' }, + { key: 'other', secret_value: 'value' }, + { key: 'other', secret_value: 'other value' }] + end + + it 'fails to create the pipeline' do + expect(pipeline).to be_failed + expect(pipeline.variables).to be_empty + expect(pipeline.errors[:base]).to eq(['Duplicate variable names: hello, other']) + end end end diff --git a/spec/services/ci/external_pull_requests/create_pipeline_service_spec.rb b/spec/services/ci/external_pull_requests/create_pipeline_service_spec.rb index 2b310443b37..04d75630295 100644 --- a/spec/services/ci/external_pull_requests/create_pipeline_service_spec.rb +++ b/spec/services/ci/external_pull_requests/create_pipeline_service_spec.rb @@ -12,7 +12,7 @@ RSpec.describe Ci::ExternalPullRequests::CreatePipelineService do project.add_maintainer(user) end - subject(:response) { described_class.new(project, user).execute(pull_request) } + subject(:execute) { described_class.new(project, user).execute(pull_request) } context 'when pull request is open' do before do @@ -21,26 +21,43 @@ RSpec.describe Ci::ExternalPullRequests::CreatePipelineService do context 'when source sha is the head of the source branch' do let(:source_branch) { project.repository.branches.last } - let(:create_pipeline_service) { instance_double(Ci::CreatePipelineService) } before do pull_request.update!(source_branch: source_branch.name, source_sha: source_branch.target) end - it 'creates a pipeline for external pull request', :aggregate_failures do - pipeline = response.payload - - expect(response).to be_success - expect(pipeline).to be_valid - expect(pipeline).to be_persisted - expect(pipeline).to be_external_pull_request_event - expect(pipeline).to eq(project.ci_pipelines.last) - expect(pipeline.external_pull_request).to eq(pull_request) - expect(pipeline.user).to eq(user) - expect(pipeline.status).to eq('created') - expect(pipeline.ref).to eq(pull_request.source_branch) - expect(pipeline.sha).to eq(pull_request.source_sha) - expect(pipeline.source_sha).to eq(pull_request.source_sha) + context 'when the FF ci_create_external_pr_pipeline_async is disabled' do + before do + stub_feature_flags(ci_create_external_pr_pipeline_async: false) + end + + it 'creates a pipeline for external pull request', :aggregate_failures do + pipeline = execute.payload + + expect(execute).to be_success + expect(pipeline).to be_valid + expect(pipeline).to be_persisted + expect(pipeline).to be_external_pull_request_event + expect(pipeline).to eq(project.ci_pipelines.last) + expect(pipeline.external_pull_request).to eq(pull_request) + expect(pipeline.user).to eq(user) + expect(pipeline.status).to eq('created') + expect(pipeline.ref).to eq(pull_request.source_branch) + expect(pipeline.sha).to eq(pull_request.source_sha) + expect(pipeline.source_sha).to eq(pull_request.source_sha) + end + end + + it 'enqueues Ci::ExternalPullRequests::CreatePipelineWorker' do + expect { execute } + .to change { ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.count } + .by(1) + + args = ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.last['args'] + + expect(args[0]).to eq(project.id) + expect(args[1]).to eq(user.id) + expect(args[2]).to eq(pull_request.id) end end @@ -53,11 +70,12 @@ RSpec.describe Ci::ExternalPullRequests::CreatePipelineService do end it 'does nothing', :aggregate_failures do - expect(Ci::CreatePipelineService).not_to receive(:new) + expect { execute } + .not_to change { ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.count } - expect(response).to be_error - expect(response.message).to eq('The source sha is not the head of the source branch') - expect(response.payload).to be_nil + expect(execute).to be_error + expect(execute.message).to eq('The source sha is not the head of the source branch') + expect(execute.payload).to be_nil end end end @@ -68,11 +86,12 @@ RSpec.describe Ci::ExternalPullRequests::CreatePipelineService do end it 'does nothing', :aggregate_failures do - expect(Ci::CreatePipelineService).not_to receive(:new) + expect { execute } + .not_to change { ::Ci::ExternalPullRequests::CreatePipelineWorker.jobs.count } - expect(response).to be_error - expect(response.message).to eq('The pull request is not opened') - expect(response.payload).to be_nil + expect(execute).to be_error + expect(execute.message).to eq('The pull request is not opened') + expect(execute.payload).to be_nil end end end diff --git a/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb b/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb index 04fa55068f2..7a91ad9dcc1 100644 --- a/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb +++ b/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb @@ -10,20 +10,16 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s describe '.execute' do subject { service.execute } - let_it_be(:artifact, refind: true) do - create(:ci_job_artifact, expire_at: 1.day.ago) - end - - before(:all) do - artifact.job.pipeline.unlocked! - end + let_it_be(:locked_pipeline) { create(:ci_pipeline, :artifacts_locked) } + let_it_be(:pipeline) { create(:ci_pipeline, :unlocked) } + let_it_be(:locked_job) { create(:ci_build, :success, pipeline: locked_pipeline) } + let_it_be(:job) { create(:ci_build, :success, pipeline: pipeline) } context 'when artifact is expired' do + let!(:artifact) { create(:ci_job_artifact, :expired, job: job) } + context 'with preloaded relationships' do before do - job = create(:ci_build, pipeline: artifact.job.pipeline) - create(:ci_job_artifact, :archive, :expired, job: job) - stub_const("#{described_class}::LOOP_LIMIT", 1) end @@ -39,7 +35,7 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s # COMMIT # SELECT next expired ci_job_artifacts - expect(log.count).to be_within(1).of(11) + expect(log.count).to be_within(1).of(10) end end @@ -48,7 +44,7 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s expect { subject }.to change { Ci::JobArtifact.count }.by(-1) end - context 'when the artifact does not a file attached to it' do + context 'when the artifact does not have a file attached to it' do it 'does not create deleted objects' do expect(artifact.exists?).to be_falsy # sanity check @@ -57,10 +53,7 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end context 'when the artifact has a file attached to it' do - before do - artifact.file = fixture_file_upload(Rails.root.join('spec/fixtures/ci_build_artifacts.zip'), 'application/zip') - artifact.save! - end + let!(:artifact) { create(:ci_job_artifact, :expired, :zip, job: job) } it 'creates a deleted object' do expect { subject }.to change { Ci::DeletedObject.count }.by(1) @@ -81,9 +74,7 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end context 'when artifact is locked' do - before do - artifact.job.pipeline.artifacts_locked! - end + let!(:artifact) { create(:ci_job_artifact, :expired, job: locked_job) } it 'does not destroy job artifact' do expect { subject }.not_to change { Ci::JobArtifact.count } @@ -92,9 +83,7 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end context 'when artifact is not expired' do - before do - artifact.update_column(:expire_at, 1.day.since) - end + let!(:artifact) { create(:ci_job_artifact, job: job) } it 'does not destroy expired job artifacts' do expect { subject }.not_to change { Ci::JobArtifact.count } @@ -102,9 +91,7 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end context 'when artifact is permanent' do - before do - artifact.update_column(:expire_at, nil) - end + let!(:artifact) { create(:ci_job_artifact, expire_at: nil, job: job) } it 'does not destroy expired job artifacts' do expect { subject }.not_to change { Ci::JobArtifact.count } @@ -112,6 +99,8 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end context 'when failed to destroy artifact' do + let!(:artifact) { create(:ci_job_artifact, :expired, job: job) } + before do stub_const("#{described_class}::LOOP_LIMIT", 10) end @@ -146,58 +135,67 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end context 'when exclusive lease has already been taken by the other instance' do + let!(:artifact) { create(:ci_job_artifact, :expired, job: job) } + before do stub_exclusive_lease_taken(described_class::EXCLUSIVE_LOCK_KEY, timeout: described_class::LOCK_TIMEOUT) end it 'raises an error and does not start destroying' do expect { subject }.to raise_error(Gitlab::ExclusiveLeaseHelpers::FailedToObtainLockError) + .and not_change { Ci::JobArtifact.count }.from(1) end end - context 'when timeout happens' do - let!(:second_artifact) { create(:ci_job_artifact, expire_at: 1.day.ago) } + context 'with a second artifact and batch size of 1' do + let(:second_job) { create(:ci_build, :success, pipeline: pipeline) } + let!(:second_artifact) { create(:ci_job_artifact, :archive, expire_at: 1.day.ago, job: second_job) } + let!(:artifact) { create(:ci_job_artifact, :expired, job: job) } before do - stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds) stub_const("#{described_class}::BATCH_SIZE", 1) - - second_artifact.job.pipeline.unlocked! end - it 'destroys one artifact' do - expect { subject }.to change { Ci::JobArtifact.count }.by(-1) - end - - it 'reports the number of destroyed artifacts' do - is_expected.to eq(1) - end - end + context 'when timeout happens' do + before do + stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds) + end - context 'when loop reached loop limit' do - before do - stub_const("#{described_class}::LOOP_LIMIT", 1) - stub_const("#{described_class}::BATCH_SIZE", 1) + it 'destroys one artifact' do + expect { subject }.to change { Ci::JobArtifact.count }.by(-1) + end - second_artifact.job.pipeline.unlocked! + it 'reports the number of destroyed artifacts' do + is_expected.to eq(1) + end end - let!(:second_artifact) { create(:ci_job_artifact, expire_at: 1.day.ago) } + context 'when loop reached loop limit' do + before do + stub_const("#{described_class}::LOOP_LIMIT", 1) + end - it 'destroys one artifact' do - expect { subject }.to change { Ci::JobArtifact.count }.by(-1) + it 'destroys one artifact' do + expect { subject }.to change { Ci::JobArtifact.count }.by(-1) + end + + it 'reports the number of destroyed artifacts' do + is_expected.to eq(1) + end end - it 'reports the number of destroyed artifacts' do - is_expected.to eq(1) + context 'when the number of artifacts is greater than than batch size' do + it 'destroys all expired artifacts' do + expect { subject }.to change { Ci::JobArtifact.count }.by(-2) + end + + it 'reports the number of destroyed artifacts' do + is_expected.to eq(2) + end end end context 'when there are no artifacts' do - before do - artifact.destroy! - end - it 'does not raise error' do expect { subject }.not_to raise_error end @@ -207,42 +205,18 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s end end - context 'when there are artifacts more than batch sizes' do - before do - stub_const("#{described_class}::BATCH_SIZE", 1) - - second_artifact.job.pipeline.unlocked! - end - - let!(:second_artifact) { create(:ci_job_artifact, expire_at: 1.day.ago) } - - it 'destroys all expired artifacts' do - expect { subject }.to change { Ci::JobArtifact.count }.by(-2) - end - - it 'reports the number of destroyed artifacts' do - is_expected.to eq(2) - end - end - context 'when some artifacts are locked' do - before do - pipeline = create(:ci_pipeline, locked: :artifacts_locked) - job = create(:ci_build, pipeline: pipeline) - create(:ci_job_artifact, expire_at: 1.day.ago, job: job) - end + let!(:artifact) { create(:ci_job_artifact, :expired, job: job) } + let!(:locked_artifact) { create(:ci_job_artifact, :expired, job: locked_job) } it 'destroys only unlocked artifacts' do expect { subject }.to change { Ci::JobArtifact.count }.by(-1) + expect(locked_artifact).to be_persisted end end context 'when all artifacts are locked' do - before do - pipeline = create(:ci_pipeline, locked: :artifacts_locked) - job = create(:ci_build, pipeline: pipeline) - artifact.update!(job: job) - end + let!(:artifact) { create(:ci_job_artifact, :expired, job: locked_job) } it 'destroys no artifacts' do expect { subject }.to change { Ci::JobArtifact.count }.by(0) diff --git a/spec/services/ci/pipeline_processing/shared_processing_service.rb b/spec/services/ci/pipeline_processing/shared_processing_service.rb index a4bc8e68b2d..8de9b308429 100644 --- a/spec/services/ci/pipeline_processing/shared_processing_service.rb +++ b/spec/services/ci/pipeline_processing/shared_processing_service.rb @@ -908,6 +908,39 @@ RSpec.shared_examples 'Pipeline Processing Service' do end end + context 'when a bridge job has invalid downstream project', :sidekiq_inline do + let(:config) do + <<-EOY + test: + stage: test + script: echo test + + deploy: + stage: deploy + trigger: + project: invalid-project + EOY + end + + let(:pipeline) do + Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload + end + + before do + stub_ci_pipeline_yaml_file(config) + end + + it 'creates a pipeline, then fails the bridge job' do + expect(all_builds_names).to contain_exactly('test', 'deploy') + expect(all_builds_statuses).to contain_exactly('pending', 'created') + + succeed_pending + + expect(all_builds_names).to contain_exactly('test', 'deploy') + expect(all_builds_statuses).to contain_exactly('success', 'failed') + end + end + private def all_builds diff --git a/spec/services/ci/pipeline_trigger_service_spec.rb b/spec/services/ci/pipeline_trigger_service_spec.rb index 2f93b1ecd3c..29d12b0dd0e 100644 --- a/spec/services/ci/pipeline_trigger_service_spec.rb +++ b/spec/services/ci/pipeline_trigger_service_spec.rb @@ -103,6 +103,17 @@ RSpec.describe Ci::PipelineTriggerService do end end + context 'when params have duplicate variables' do + let(:params) { { token: trigger.token, ref: 'master', variables: variables } } + let(:variables) { { 'TRIGGER_PAYLOAD' => 'duplicate value' } } + + it 'creates a failed pipeline without variables' do + expect { result }.to change { Ci::Pipeline.count } + expect(result).to be_error + expect(result.message[:base]).to eq(['Duplicate variable name: TRIGGER_PAYLOAD']) + end + end + it_behaves_like 'detecting an unprocessable pipeline trigger' end @@ -201,6 +212,17 @@ RSpec.describe Ci::PipelineTriggerService do end end + context 'when params have duplicate variables' do + let(:params) { { token: job.token, ref: 'master', variables: variables } } + let(:variables) { { 'TRIGGER_PAYLOAD' => 'duplicate value' } } + + it 'creates a failed pipeline without variables' do + expect { result }.to change { Ci::Pipeline.count } + expect(result).to be_error + expect(result.message[:base]).to eq(['Duplicate variable name: TRIGGER_PAYLOAD']) + end + end + it_behaves_like 'detecting an unprocessable pipeline trigger' end diff --git a/spec/services/ci/pipelines/add_job_service_spec.rb b/spec/services/ci/pipelines/add_job_service_spec.rb index bdf7e577fa8..3a77d26dd9e 100644 --- a/spec/services/ci/pipelines/add_job_service_spec.rb +++ b/spec/services/ci/pipelines/add_job_service_spec.rb @@ -59,18 +59,6 @@ RSpec.describe Ci::Pipelines::AddJobService do end end - context 'when the FF ci_fix_commit_status_retried is disabled' do - before do - stub_feature_flags(ci_fix_commit_status_retried: false) - end - - it 'does not call update_older_statuses_retried!' do - expect(job).not_to receive(:update_older_statuses_retried!) - - execute - end - end - context 'exclusive lock' do let(:lock_uuid) { 'test' } let(:lock_key) { "ci:pipelines:#{pipeline.id}:add-job" } diff --git a/spec/services/ci/register_job_service_spec.rb b/spec/services/ci/register_job_service_spec.rb index 2f37d0ea42d..73ff15ec393 100644 --- a/spec/services/ci/register_job_service_spec.rb +++ b/spec/services/ci/register_job_service_spec.rb @@ -40,12 +40,16 @@ module Ci context 'runner follow tag list' do it "picks build with the same tag" do pending_job.update!(tag_list: ["linux"]) + pending_job.reload + pending_job.create_queuing_entry! specific_runner.update!(tag_list: ["linux"]) expect(execute(specific_runner)).to eq(pending_job) end it "does not pick build with different tag" do pending_job.update!(tag_list: ["linux"]) + pending_job.reload + pending_job.create_queuing_entry! specific_runner.update!(tag_list: ["win32"]) expect(execute(specific_runner)).to be_falsey end @@ -56,6 +60,8 @@ module Ci it "does not pick build with tag" do pending_job.update!(tag_list: ["linux"]) + pending_job.reload + pending_job.create_queuing_entry! expect(execute(specific_runner)).to be_falsey end @@ -81,8 +87,30 @@ module Ci end context 'for specific runner' do - it 'does not pick a build' do - expect(execute(specific_runner)).to be_nil + context 'with FF disabled' do + before do + stub_feature_flags( + ci_pending_builds_project_runners_decoupling: false, + ci_queueing_builds_enabled_checks: false) + end + + it 'does not pick a build' do + expect(execute(specific_runner)).to be_nil + end + end + + context 'with FF enabled' do + before do + stub_feature_flags( + ci_pending_builds_project_runners_decoupling: true, + ci_queueing_builds_enabled_checks: true) + end + + it 'does not pick a build' do + expect(execute(specific_runner)).to be_nil + expect(pending_job.reload).to be_failed + expect(pending_job.queuing_entry).to be_nil + end end end end @@ -219,6 +247,8 @@ module Ci before do project.update!(shared_runners_enabled: true, group_runners_enabled: true) project.project_feature.update_attribute(:builds_access_level, ProjectFeature::DISABLED) + + pending_job.reload.create_queuing_entry! end context 'and uses shared runner' do @@ -236,7 +266,29 @@ module Ci context 'and uses project runner' do let(:build) { execute(specific_runner) } - it { expect(build).to be_nil } + context 'with FF disabled' do + before do + stub_feature_flags( + ci_pending_builds_project_runners_decoupling: false, + ci_queueing_builds_enabled_checks: false) + end + + it { expect(build).to be_nil } + end + + context 'with FF enabled' do + before do + stub_feature_flags( + ci_pending_builds_project_runners_decoupling: true, + ci_queueing_builds_enabled_checks: true) + end + + it 'does not pick a build' do + expect(build).to be_nil + expect(pending_job.reload).to be_failed + expect(pending_job.queuing_entry).to be_nil + end + end end end @@ -304,6 +356,8 @@ module Ci context 'disallow group runners' do before do project.update!(group_runners_enabled: false) + + pending_job.reload.create_queuing_entry! end context 'group runner' do @@ -739,6 +793,30 @@ module Ci include_examples 'handles runner assignment' end + + context 'with ci_queueing_denormalize_tags_information enabled' do + before do + stub_feature_flags(ci_queueing_denormalize_tags_information: true) + end + + include_examples 'handles runner assignment' + end + + context 'with ci_queueing_denormalize_tags_information disabled' do + before do + stub_feature_flags(ci_queueing_denormalize_tags_information: false) + end + + include_examples 'handles runner assignment' + end + + context 'with ci_queueing_denormalize_namespace_traversal_ids disabled' do + before do + stub_feature_flags(ci_queueing_denormalize_namespace_traversal_ids: false) + end + + include_examples 'handles runner assignment' + end end context 'when not using pending builds table' do diff --git a/spec/services/ci/stuck_builds/drop_service_spec.rb b/spec/services/ci/stuck_builds/drop_service_spec.rb new file mode 100644 index 00000000000..8dfd1bc1b3d --- /dev/null +++ b/spec/services/ci/stuck_builds/drop_service_spec.rb @@ -0,0 +1,284 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Ci::StuckBuilds::DropService do + let!(:runner) { create :ci_runner } + let!(:job) { create :ci_build, runner: runner } + let(:created_at) { } + let(:updated_at) { } + + subject(:service) { described_class.new } + + before do + job_attributes = { status: status } + job_attributes[:created_at] = created_at if created_at + job_attributes[:updated_at] = updated_at if updated_at + job.update!(job_attributes) + end + + shared_examples 'job is dropped' do + it 'changes status' do + expect(service).to receive(:drop).exactly(3).times.and_call_original + expect(service).to receive(:drop_stuck).exactly(:once).and_call_original + + service.execute + job.reload + + expect(job).to be_failed + expect(job).to be_stuck_or_timeout_failure + end + + context 'when job have data integrity problem' do + it "does drop the job and logs the reason" do + job.update_columns(yaml_variables: '[{"key" => "value"}]') + + expect(Gitlab::ErrorTracking).to receive(:track_exception) + .with(anything, a_hash_including(build_id: job.id)) + .once + .and_call_original + + service.execute + job.reload + + expect(job).to be_failed + expect(job).to be_data_integrity_failure + end + end + end + + shared_examples 'job is unchanged' do + it 'does not change status' do + expect(service).to receive(:drop).exactly(3).times.and_call_original + expect(service).to receive(:drop_stuck).exactly(:once).and_call_original + + service.execute + job.reload + + expect(job.status).to eq(status) + end + end + + context 'when job is pending' do + let(:status) { 'pending' } + + context 'when job is not stuck' do + before do + allow_next_found_instance_of(Ci::Build) do |build| + allow(build).to receive(:stuck?).and_return(false) + end + end + + context 'when job was updated_at more than 1 day ago' do + let(:updated_at) { 1.5.days.ago } + + context 'when created_at is the same as updated_at' do + let(:created_at) { 1.5.days.ago } + + it_behaves_like 'job is dropped' + end + + context 'when created_at is before updated_at' do + let(:created_at) { 3.days.ago } + + it_behaves_like 'job is dropped' + end + + context 'when created_at is outside lookback window' do + let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + + it_behaves_like 'job is unchanged' + end + end + + context 'when job was updated less than 1 day ago' do + let(:updated_at) { 6.hours.ago } + + context 'when created_at is the same as updated_at' do + let(:created_at) { 1.5.days.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is before updated_at' do + let(:created_at) { 3.days.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is outside lookback window' do + let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + + it_behaves_like 'job is unchanged' + end + end + + context 'when job was updated more than 1 hour ago' do + let(:updated_at) { 2.hours.ago } + + context 'when created_at is the same as updated_at' do + let(:created_at) { 2.hours.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is before updated_at' do + let(:created_at) { 3.days.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is outside lookback window' do + let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + + it_behaves_like 'job is unchanged' + end + end + end + + context 'when job is stuck' do + before do + allow_next_found_instance_of(Ci::Build) do |build| + allow(build).to receive(:stuck?).and_return(true) + end + end + + context 'when job was updated_at more than 1 hour ago' do + let(:updated_at) { 1.5.hours.ago } + + context 'when created_at is the same as updated_at' do + let(:created_at) { 1.5.hours.ago } + + it_behaves_like 'job is dropped' + end + + context 'when created_at is before updated_at' do + let(:created_at) { 3.days.ago } + + it_behaves_like 'job is dropped' + end + + context 'when created_at is outside lookback window' do + let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + + it_behaves_like 'job is unchanged' + end + end + + context 'when job was updated in less than 1 hour ago' do + let(:updated_at) { 30.minutes.ago } + + context 'when created_at is the same as updated_at' do + let(:created_at) { 30.minutes.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is before updated_at' do + let(:created_at) { 2.days.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is outside lookback window' do + let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + + it_behaves_like 'job is unchanged' + end + end + end + end + + context 'when job is running' do + let(:status) { 'running' } + + context 'when job was updated_at more than an hour ago' do + let(:updated_at) { 2.hours.ago } + + it_behaves_like 'job is dropped' + end + + context 'when job was updated in less than 1 hour ago' do + let(:updated_at) { 30.minutes.ago } + + it_behaves_like 'job is unchanged' + end + end + + %w(success skipped failed canceled).each do |status| + context "when job is #{status}" do + let(:status) { status } + let(:updated_at) { 2.days.ago } + + context 'when created_at is the same as updated_at' do + let(:created_at) { 2.days.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is before updated_at' do + let(:created_at) { 3.days.ago } + + it_behaves_like 'job is unchanged' + end + + context 'when created_at is outside lookback window' do + let(:created_at) { described_class::BUILD_LOOKBACK - 1.day } + + it_behaves_like 'job is unchanged' + end + end + end + + context 'for deleted project' do + let(:status) { 'running' } + let(:updated_at) { 2.days.ago } + + before do + job.project.update!(pending_delete: true) + end + + it_behaves_like 'job is dropped' + end + + describe 'drop stale scheduled builds' do + let(:status) { 'scheduled' } + let(:updated_at) { } + + context 'when scheduled at 2 hours ago but it is not executed yet' do + let!(:job) { create(:ci_build, :scheduled, scheduled_at: 2.hours.ago) } + + it 'drops the stale scheduled build' do + expect(Ci::Build.scheduled.count).to eq(1) + expect(job).to be_scheduled + + service.execute + job.reload + + expect(Ci::Build.scheduled.count).to eq(0) + expect(job).to be_failed + expect(job).to be_stale_schedule + end + end + + context 'when scheduled at 30 minutes ago but it is not executed yet' do + let!(:job) { create(:ci_build, :scheduled, scheduled_at: 30.minutes.ago) } + + it 'does not drop the stale scheduled build yet' do + expect(Ci::Build.scheduled.count).to eq(1) + expect(job).to be_scheduled + + service.execute + + expect(Ci::Build.scheduled.count).to eq(1) + expect(job).to be_scheduled + end + end + + context 'when there are no stale scheduled builds' do + it 'does not drop the stale scheduled build yet' do + expect { service.execute }.not_to raise_error + end + end + end +end diff --git a/spec/services/ci/update_pending_build_service_spec.rb b/spec/services/ci/update_pending_build_service_spec.rb new file mode 100644 index 00000000000..d842042de40 --- /dev/null +++ b/spec/services/ci/update_pending_build_service_spec.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Ci::UpdatePendingBuildService do + describe '#execute' do + let_it_be(:group) { create(:group) } + let_it_be(:project) { create(:project, namespace: group) } + let_it_be(:pending_build_1) { create(:ci_pending_build, project: project, instance_runners_enabled: false) } + let_it_be(:pending_build_2) { create(:ci_pending_build, project: project, instance_runners_enabled: true) } + let_it_be(:update_params) { { instance_runners_enabled: true } } + + subject(:service) { described_class.new(model, update_params).execute } + + context 'validations' do + context 'when model is invalid' do + let(:model) { pending_build_1 } + + it 'raises an error' do + expect { service }.to raise_error(described_class::InvalidModelError) + end + end + + context 'when params is invalid' do + let(:model) { group } + let(:update_params) { { minutes_exceeded: true } } + + it 'raises an error' do + expect { service }.to raise_error(described_class::InvalidParamsError) + end + end + end + + context 'when model is a group with pending builds' do + let(:model) { group } + + it 'updates all pending builds', :aggregate_failures do + service + + expect(pending_build_1.reload.instance_runners_enabled).to be_truthy + expect(pending_build_2.reload.instance_runners_enabled).to be_truthy + end + + context 'when ci_pending_builds_maintain_shared_runners_data is disabled' do + before do + stub_feature_flags(ci_pending_builds_maintain_shared_runners_data: false) + end + + it 'does not update all pending builds', :aggregate_failures do + service + + expect(pending_build_1.reload.instance_runners_enabled).to be_falsey + expect(pending_build_2.reload.instance_runners_enabled).to be_truthy + end + end + end + + context 'when model is a project with pending builds' do + let(:model) { project } + + it 'updates all pending builds', :aggregate_failures do + service + + expect(pending_build_1.reload.instance_runners_enabled).to be_truthy + expect(pending_build_2.reload.instance_runners_enabled).to be_truthy + end + + context 'when ci_pending_builds_maintain_shared_runners_data is disabled' do + before do + stub_feature_flags(ci_pending_builds_maintain_shared_runners_data: false) + end + + it 'does not update all pending builds', :aggregate_failures do + service + + expect(pending_build_1.reload.instance_runners_enabled).to be_falsey + expect(pending_build_2.reload.instance_runners_enabled).to be_truthy + end + end + end + end +end |