summaryrefslogtreecommitdiff
path: root/spec/services/ci
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2021-12-20 13:37:47 +0000
committerGitLab Bot <gitlab-bot@gitlab.com>2021-12-20 13:37:47 +0000
commitaee0a117a889461ce8ced6fcf73207fe017f1d99 (patch)
tree891d9ef189227a8445d83f35c1b0fc99573f4380 /spec/services/ci
parent8d46af3258650d305f53b819eabf7ab18d22f59e (diff)
downloadgitlab-ce-aee0a117a889461ce8ced6fcf73207fe017f1d99.tar.gz
Add latest changes from gitlab-org/gitlab@14-6-stable-eev14.6.0-rc42
Diffstat (limited to 'spec/services/ci')
-rw-r--r--spec/services/ci/create_pipeline_service/logger_spec.rb139
-rw-r--r--spec/services/ci/create_pipeline_service/tags_spec.rb173
-rw-r--r--spec/services/ci/create_pipeline_service_spec.rb110
-rw-r--r--spec/services/ci/expire_pipeline_cache_service_spec.rb54
-rw-r--r--spec/services/ci/generate_terraform_reports_service_spec.rb2
-rw-r--r--spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb40
-rw-r--r--spec/services/ci/job_artifacts/destroy_batch_service_spec.rb85
-rw-r--r--spec/services/ci/parse_dotenv_artifact_service_spec.rb40
-rw-r--r--spec/services/ci/pipeline_processing/atomic_processing_service_spec.rb1102
-rw-r--r--spec/services/ci/pipeline_processing/shared_processing_service.rb1040
-rw-r--r--spec/services/ci/pipeline_processing/shared_processing_service_tests_with_yaml.rb61
-rw-r--r--spec/services/ci/play_build_service_spec.rb17
-rw-r--r--spec/services/ci/process_sync_events_service_spec.rb129
-rw-r--r--spec/services/ci/register_job_service_spec.rb106
-rw-r--r--spec/services/ci/retry_build_service_spec.rb27
-rw-r--r--spec/services/ci/stuck_builds/drop_pending_service_spec.rb46
-rw-r--r--spec/services/ci/update_pending_build_service_spec.rb8
17 files changed, 1872 insertions, 1307 deletions
diff --git a/spec/services/ci/create_pipeline_service/logger_spec.rb b/spec/services/ci/create_pipeline_service/logger_spec.rb
new file mode 100644
index 00000000000..dfe0859015d
--- /dev/null
+++ b/spec/services/ci/create_pipeline_service/logger_spec.rb
@@ -0,0 +1,139 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Ci::CreatePipelineService do
+ context 'pipeline logger' do
+ let_it_be(:project) { create(:project, :repository) }
+ let_it_be(:user) { project.owner }
+
+ let(:ref) { 'refs/heads/master' }
+ let(:service) { described_class.new(project, user, { ref: ref }) }
+ let(:pipeline) { service.execute(:push).payload }
+ let(:file_location) { 'spec/fixtures/gitlab/ci/external_files/.gitlab-ci-template-1.yml' }
+
+ before do
+ stub_ci_pipeline_yaml_file(gitlab_ci_yaml)
+ end
+
+ let(:counters) do
+ {
+ 'count' => a_kind_of(Numeric),
+ 'avg' => a_kind_of(Numeric),
+ 'max' => a_kind_of(Numeric),
+ 'min' => a_kind_of(Numeric)
+ }
+ end
+
+ let(:loggable_data) do
+ {
+ 'pipeline_creation_caller' => 'Ci::CreatePipelineService',
+ 'pipeline_source' => 'push',
+ 'pipeline_id' => a_kind_of(Numeric),
+ 'pipeline_persisted' => true,
+ 'project_id' => project.id,
+ 'pipeline_creation_service_duration_s' => a_kind_of(Numeric),
+ 'pipeline_creation_duration_s' => counters,
+ 'pipeline_size_count' => counters,
+ 'pipeline_step_gitlab_ci_pipeline_chain_seed_duration_s' => counters
+ }
+ end
+
+ context 'when the duration is under the threshold' do
+ it 'does not create a log entry but it collects the data' do
+ expect(Gitlab::AppJsonLogger).not_to receive(:info)
+ expect(pipeline).to be_created_successfully
+
+ expect(service.logger.observations_hash)
+ .to match(
+ a_hash_including(
+ 'pipeline_creation_duration_s' => counters,
+ 'pipeline_size_count' => counters,
+ 'pipeline_step_gitlab_ci_pipeline_chain_seed_duration_s' => counters
+ )
+ )
+ end
+ end
+
+ context 'when the durations exceeds the threshold' do
+ let(:timer) do
+ proc do
+ @timer = @timer.to_i + 30
+ end
+ end
+
+ before do
+ allow(Gitlab::Ci::Pipeline::Logger)
+ .to receive(:current_monotonic_time) { timer.call }
+ end
+
+ it 'creates a log entry' do
+ expect(Gitlab::AppJsonLogger)
+ .to receive(:info)
+ .with(a_hash_including(loggable_data))
+ .and_call_original
+
+ expect(pipeline).to be_created_successfully
+ end
+
+ context 'when the pipeline is not persisted' do
+ let(:loggable_data) do
+ {
+ 'pipeline_creation_caller' => 'Ci::CreatePipelineService',
+ 'pipeline_source' => 'push',
+ 'pipeline_id' => nil,
+ 'pipeline_persisted' => false,
+ 'project_id' => project.id,
+ 'pipeline_creation_service_duration_s' => a_kind_of(Numeric),
+ 'pipeline_step_gitlab_ci_pipeline_chain_seed_duration_s' => counters
+ }
+ end
+
+ before do
+ allow_next_instance_of(Ci::Pipeline) do |pipeline|
+ expect(pipeline).to receive(:save!).and_raise { RuntimeError }
+ end
+ end
+
+ it 'creates a log entry' do
+ expect(Gitlab::AppJsonLogger)
+ .to receive(:info)
+ .with(a_hash_including(loggable_data))
+ .and_call_original
+
+ expect { pipeline }.to raise_error(RuntimeError)
+ end
+ end
+
+ context 'when the feature flag is disabled' do
+ before do
+ stub_feature_flags(ci_pipeline_creation_logger: false)
+ end
+
+ it 'does not create a log entry' do
+ expect(Gitlab::AppJsonLogger).not_to receive(:info)
+
+ expect(pipeline).to be_created_successfully
+ expect(service.logger.observations_hash).to eq({})
+ end
+ end
+ end
+
+ context 'when the size exceeds the threshold' do
+ before do
+ allow_next_instance_of(Ci::Pipeline) do |pipeline|
+ allow(pipeline).to receive(:total_size) { 5000 }
+ end
+ end
+
+ it 'creates a log entry' do
+ expect(Gitlab::AppJsonLogger)
+ .to receive(:info)
+ .with(a_hash_including(loggable_data))
+ .and_call_original
+
+ expect(pipeline).to be_created_successfully
+ end
+ end
+ end
+end
diff --git a/spec/services/ci/create_pipeline_service/tags_spec.rb b/spec/services/ci/create_pipeline_service/tags_spec.rb
index 335d35010c8..cbbeb870c5f 100644
--- a/spec/services/ci/create_pipeline_service/tags_spec.rb
+++ b/spec/services/ci/create_pipeline_service/tags_spec.rb
@@ -7,16 +7,15 @@ RSpec.describe Ci::CreatePipelineService do
let_it_be(:user) { project.owner }
let(:ref) { 'refs/heads/master' }
- let(:source) { :push }
let(:service) { described_class.new(project, user, { ref: ref }) }
- let(:pipeline) { service.execute(source).payload }
+ let(:pipeline) { create_pipeline }
before do
- stub_ci_pipeline_yaml_file(config)
+ stub_yaml_config(config)
end
context 'with valid config' do
- let(:config) { YAML.dump({ test: { script: 'ls', tags: %w[tag1 tag2] } }) }
+ let(:config) { { test: { script: 'ls', tags: %w[tag1 tag2] } } }
it 'creates a pipeline', :aggregate_failures do
expect(pipeline).to be_created_successfully
@@ -25,8 +24,8 @@ RSpec.describe Ci::CreatePipelineService do
end
context 'with too many tags' do
- let(:tags) { Array.new(50) {|i| "tag-#{i}" } }
- let(:config) { YAML.dump({ test: { script: 'ls', tags: tags } }) }
+ let(:tags) { build_tag_list(label: 'custom', size: 50) }
+ let(:config) { { test: { script: 'ls', tags: tags } } }
it 'creates a pipeline without builds', :aggregate_failures do
expect(pipeline).not_to be_created_successfully
@@ -34,5 +33,167 @@ RSpec.describe Ci::CreatePipelineService do
expect(pipeline.yaml_errors).to eq("jobs:test:tags config must be less than the limit of #{Gitlab::Ci::Config::Entry::Tags::TAGS_LIMIT} tags")
end
end
+
+ context 'tags persistence' do
+ let(:config) do
+ {
+ build: {
+ script: 'ls',
+ stage: 'build',
+ tags: build_tag_list(label: 'build')
+ },
+ test: {
+ script: 'ls',
+ stage: 'test',
+ tags: build_tag_list(label: 'test')
+ }
+ }
+ end
+
+ let(:config_without_tags) do
+ config.transform_values { |job| job.except(:tags) }
+ end
+
+ context 'with multiple tags' do
+ context 'when the tags do not exist' do
+ it 'does not execute N+1 queries' do
+ stub_yaml_config(config_without_tags)
+
+ # warm up the cached objects so we get a more accurate count
+ create_pipeline
+
+ control = ActiveRecord::QueryRecorder.new(skip_cached: false) do
+ create_pipeline
+ end
+
+ stub_yaml_config(config)
+
+ # 2 select tags.*
+ # 1 insert tags
+ # 1 insert taggings
+ tags_queries_size = 4
+
+ expect { pipeline }
+ .not_to exceed_all_query_limit(control)
+ .with_threshold(tags_queries_size)
+
+ expect(pipeline).to be_created_successfully
+ end
+ end
+
+ context 'when the feature flag is disabled' do
+ before do
+ stub_feature_flags(ci_bulk_insert_tags: false)
+ end
+
+ it 'executes N+1s queries' do
+ stub_yaml_config(config_without_tags)
+
+ # warm up the cached objects so we get a more accurate count
+ create_pipeline
+
+ control = ActiveRecord::QueryRecorder.new(skip_cached: false) do
+ create_pipeline
+ end
+
+ stub_yaml_config(config)
+
+ expect { pipeline }
+ .to exceed_all_query_limit(control)
+ .with_threshold(4)
+
+ expect(pipeline).to be_created_successfully
+ end
+ end
+
+ context 'when tags are already persisted' do
+ it 'does not execute N+1 queries' do
+ # warm up the cached objects so we get a more accurate count
+ # and insert the tags
+ create_pipeline
+
+ control = ActiveRecord::QueryRecorder.new(skip_cached: false) do
+ create_pipeline
+ end
+
+ # 1 select tags.*
+ # 1 insert taggings
+ tags_queries_size = 2
+
+ expect { pipeline }
+ .not_to exceed_all_query_limit(control)
+ .with_threshold(tags_queries_size)
+
+ expect(pipeline).to be_created_successfully
+ end
+ end
+ end
+
+ context 'with bridge jobs' do
+ let(:config) do
+ {
+ test_1: {
+ script: 'ls',
+ stage: 'test',
+ tags: build_tag_list(label: 'test_1')
+ },
+ test_2: {
+ script: 'ls',
+ stage: 'test',
+ tags: build_tag_list(label: '$CI_JOB_NAME')
+ },
+ test_3: {
+ script: 'ls',
+ stage: 'test',
+ tags: build_tag_list(label: 'test_1') + build_tag_list(label: 'test_2')
+ },
+ test_4: {
+ script: 'ls',
+ stage: 'test'
+ },
+ deploy: {
+ stage: 'deploy',
+ trigger: 'my/project'
+ }
+ }
+ end
+
+ it do
+ expect(pipeline).to be_created_successfully
+ expect(pipeline.bridges.size).to eq(1)
+ expect(pipeline.builds.size).to eq(4)
+
+ expect(tags_for('test_1'))
+ .to have_attributes(count: 5)
+ .and all(match(/test_1-tag-\d+/))
+
+ expect(tags_for('test_2'))
+ .to have_attributes(count: 5)
+ .and all(match(/test_2-tag-\d+/))
+
+ expect(tags_for('test_3'))
+ .to have_attributes(count: 10)
+ .and all(match(/test_[1,2]-tag-\d+/))
+
+ expect(tags_for('test_4')).to be_empty
+ end
+ end
+ end
+ end
+
+ def tags_for(build_name)
+ pipeline.builds.find_by_name(build_name).tag_list
+ end
+
+ def stub_yaml_config(config)
+ stub_ci_pipeline_yaml_file(YAML.dump(config))
+ end
+
+ def create_pipeline
+ service.execute(:push).payload
+ end
+
+ def build_tag_list(label:, size: 5)
+ Array.new(size) { |index| "#{label}-tag-#{index}" }
end
end
diff --git a/spec/services/ci/create_pipeline_service_spec.rb b/spec/services/ci/create_pipeline_service_spec.rb
index c78e19ea62d..ef879d536c3 100644
--- a/spec/services/ci/create_pipeline_service_spec.rb
+++ b/spec/services/ci/create_pipeline_service_spec.rb
@@ -46,6 +46,47 @@ RSpec.describe Ci::CreatePipelineService do
end
# rubocop:enable Metrics/ParameterLists
+ context 'performance' do
+ it_behaves_like 'pipelines are created without N+1 SQL queries' do
+ let(:config1) do
+ <<~YAML
+ job1:
+ stage: build
+ script: exit 0
+
+ job2:
+ stage: test
+ script: exit 0
+ YAML
+ end
+
+ let(:config2) do
+ <<~YAML
+ job1:
+ stage: build
+ script: exit 0
+
+ job2:
+ stage: test
+ script: exit 0
+
+ job3:
+ stage: deploy
+ script: exit 0
+ YAML
+ end
+
+ let(:accepted_n_plus_ones) do
+ 1 + # SELECT "ci_instance_variables"
+ 1 + # INSERT INTO "ci_stages"
+ 1 + # SELECT "ci_builds".* FROM "ci_builds"
+ 1 + # INSERT INTO "ci_builds"
+ 1 + # INSERT INTO "ci_builds_metadata"
+ 1 # SELECT "taggings".* FROM "taggings"
+ end
+ end
+ end
+
context 'valid params' do
let(:pipeline) { execute_service.payload }
@@ -1951,6 +1992,75 @@ RSpec.describe Ci::CreatePipelineService do
let(:rules_job) { find_job('rules-job') }
let(:delayed_job) { find_job('delayed-job') }
+ context 'with when:manual' do
+ let(:config) do
+ <<-EOY
+ job-with-rules:
+ script: 'echo hey'
+ rules:
+ - if: $CI_COMMIT_REF_NAME =~ /master/
+
+ job-when-with-rules:
+ script: 'echo hey'
+ when: manual
+ rules:
+ - if: $CI_COMMIT_REF_NAME =~ /master/
+
+ job-when-with-rules-when:
+ script: 'echo hey'
+ when: manual
+ rules:
+ - if: $CI_COMMIT_REF_NAME =~ /master/
+ when: on_success
+
+ job-with-rules-when:
+ script: 'echo hey'
+ rules:
+ - if: $CI_COMMIT_REF_NAME =~ /master/
+ when: manual
+
+ job-without-rules:
+ script: 'echo this is a job with NO rules'
+ EOY
+ end
+
+ let(:job_with_rules) { find_job('job-with-rules') }
+ let(:job_when_with_rules) { find_job('job-when-with-rules') }
+ let(:job_when_with_rules_when) { find_job('job-when-with-rules-when') }
+ let(:job_with_rules_when) { find_job('job-with-rules-when') }
+ let(:job_without_rules) { find_job('job-without-rules') }
+
+ context 'when matching the rules' do
+ let(:ref_name) { 'refs/heads/master' }
+
+ it 'adds the job-with-rules with a when:manual' do
+ expect(job_with_rules).to be_persisted
+ expect(job_when_with_rules).to be_persisted
+ expect(job_when_with_rules_when).to be_persisted
+ expect(job_with_rules_when).to be_persisted
+ expect(job_without_rules).to be_persisted
+
+ expect(job_with_rules.when).to eq('on_success')
+ expect(job_when_with_rules.when).to eq('manual')
+ expect(job_when_with_rules_when.when).to eq('on_success')
+ expect(job_with_rules_when.when).to eq('manual')
+ expect(job_without_rules.when).to eq('on_success')
+ end
+ end
+
+ context 'when there is no match to the rule' do
+ let(:ref_name) { 'refs/heads/wip' }
+
+ it 'does not add job_with_rules' do
+ expect(job_with_rules).to be_nil
+ expect(job_when_with_rules).to be_nil
+ expect(job_when_with_rules_when).to be_nil
+ expect(job_with_rules_when).to be_nil
+ expect(job_without_rules).to be_persisted
+ end
+ end
+ end
+
shared_examples 'rules jobs are excluded' do
it 'only persists the job without rules' do
expect(pipeline).to be_persisted
diff --git a/spec/services/ci/expire_pipeline_cache_service_spec.rb b/spec/services/ci/expire_pipeline_cache_service_spec.rb
index 613bbe45e68..8cfe756faf3 100644
--- a/spec/services/ci/expire_pipeline_cache_service_spec.rb
+++ b/spec/services/ci/expire_pipeline_cache_service_spec.rb
@@ -16,14 +16,16 @@ RSpec.describe Ci::ExpirePipelineCacheService do
pipeline_path = "/#{project.full_path}/-/pipelines/#{pipeline.id}.json"
graphql_pipeline_path = "/api/graphql:pipelines/id/#{pipeline.id}"
graphql_pipeline_sha_path = "/api/graphql:pipelines/sha/#{pipeline.sha}"
+ graphql_project_on_demand_scan_counts_path = "/api/graphql:on_demand_scan/counts/#{project.full_path}"
- expect_next_instance_of(Gitlab::EtagCaching::Store) do |store|
- expect(store).to receive(:touch).with(pipelines_path)
- expect(store).to receive(:touch).with(new_mr_pipelines_path)
- expect(store).to receive(:touch).with(pipeline_path)
- expect(store).to receive(:touch).with(graphql_pipeline_path)
- expect(store).to receive(:touch).with(graphql_pipeline_sha_path)
- end
+ expect_touched_etag_caching_paths(
+ pipelines_path,
+ new_mr_pipelines_path,
+ pipeline_path,
+ graphql_pipeline_path,
+ graphql_pipeline_sha_path,
+ graphql_project_on_demand_scan_counts_path
+ )
subject.execute(pipeline)
end
@@ -35,9 +37,10 @@ RSpec.describe Ci::ExpirePipelineCacheService do
merge_request_pipelines_path = "/#{project.full_path}/-/merge_requests/#{merge_request.iid}/pipelines.json"
merge_request_widget_path = "/#{project.full_path}/-/merge_requests/#{merge_request.iid}/cached_widget.json"
- allow_any_instance_of(Gitlab::EtagCaching::Store).to receive(:touch)
- expect_any_instance_of(Gitlab::EtagCaching::Store).to receive(:touch).with(merge_request_pipelines_path)
- expect_any_instance_of(Gitlab::EtagCaching::Store).to receive(:touch).with(merge_request_widget_path)
+ expect_touched_etag_caching_paths(
+ merge_request_pipelines_path,
+ merge_request_widget_path
+ )
subject.execute(merge_request.all_pipelines.last)
end
@@ -76,10 +79,7 @@ RSpec.describe Ci::ExpirePipelineCacheService do
it 'updates the cache of dependent pipeline' do
dependent_pipeline_path = "/#{source.source_project.full_path}/-/pipelines/#{source.source_pipeline.id}.json"
- expect_next_instance_of(Gitlab::EtagCaching::Store) do |store|
- allow(store).to receive(:touch)
- expect(store).to receive(:touch).with(dependent_pipeline_path)
- end
+ expect_touched_etag_caching_paths(dependent_pipeline_path)
subject.execute(pipeline)
end
@@ -92,13 +92,31 @@ RSpec.describe Ci::ExpirePipelineCacheService do
it 'updates the cache of dependent pipeline' do
dependent_pipeline_path = "/#{source.project.full_path}/-/pipelines/#{source.pipeline.id}.json"
- expect_next_instance_of(Gitlab::EtagCaching::Store) do |store|
- allow(store).to receive(:touch)
- expect(store).to receive(:touch).with(dependent_pipeline_path)
- end
+ expect_touched_etag_caching_paths(dependent_pipeline_path)
subject.execute(pipeline)
end
end
+
+ it 'does not do N+1 queries' do
+ subject.execute(pipeline)
+
+ control = ActiveRecord::QueryRecorder.new { subject.execute(pipeline) }
+
+ create(:ci_sources_pipeline, pipeline: pipeline)
+ create(:ci_sources_pipeline, source_job: create(:ci_build, pipeline: pipeline))
+
+ expect { subject.execute(pipeline) }.not_to exceed_query_limit(control.count)
+ end
+ end
+
+ def expect_touched_etag_caching_paths(*paths)
+ expect_next_instance_of(Gitlab::EtagCaching::Store) do |store|
+ expect(store).to receive(:touch).and_wrap_original do |m, *args|
+ expect(args).to include(*paths)
+
+ m.call(*args)
+ end
+ end
end
end
diff --git a/spec/services/ci/generate_terraform_reports_service_spec.rb b/spec/services/ci/generate_terraform_reports_service_spec.rb
index c9ac74e050c..c32e8bcaeb8 100644
--- a/spec/services/ci/generate_terraform_reports_service_spec.rb
+++ b/spec/services/ci/generate_terraform_reports_service_spec.rb
@@ -23,7 +23,7 @@ RSpec.describe Ci::GenerateTerraformReportsService do
'create' => 0,
'delete' => 0,
'update' => 1,
- 'job_name' => build.options.dig(:artifacts, :name).to_s
+ 'job_name' => build.name
))
),
key: an_instance_of(Array)
diff --git a/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb b/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb
index 6761f052e18..e71f1a4266a 100644
--- a/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb
+++ b/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb
@@ -53,6 +53,46 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s
log = ActiveRecord::QueryRecorder.new { subject }
expect(log.count).to be_within(1).of(8)
end
+
+ context 'with several locked-unknown artifact records' do
+ before do
+ stub_const("#{described_class}::LOOP_LIMIT", 10)
+ stub_const("#{described_class}::BATCH_SIZE", 2)
+ end
+
+ let!(:lockable_artifact_records) do
+ [
+ create(:ci_job_artifact, :metadata, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: locked_job),
+ create(:ci_job_artifact, :junit, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: locked_job),
+ create(:ci_job_artifact, :sast, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: locked_job),
+ create(:ci_job_artifact, :cobertura, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: locked_job),
+ create(:ci_job_artifact, :trace, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: locked_job)
+ ]
+ end
+
+ let!(:unlockable_artifact_records) do
+ [
+ create(:ci_job_artifact, :metadata, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: job),
+ create(:ci_job_artifact, :junit, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: job),
+ create(:ci_job_artifact, :sast, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: job),
+ create(:ci_job_artifact, :cobertura, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: job),
+ create(:ci_job_artifact, :trace, :expired, locked: ::Ci::JobArtifact.lockeds[:unknown], job: job),
+ artifact
+ ]
+ end
+
+ it 'updates the locked status of job artifacts from artifacts-locked pipelines' do
+ subject
+
+ expect(lockable_artifact_records).to be_all(&:persisted?)
+ expect(lockable_artifact_records).to be_all { |artifact| artifact.reload.artifact_artifacts_locked? }
+ end
+
+ it 'unlocks and then destroys job artifacts from artifacts-unlocked pipelines' do
+ expect { subject }.to change { Ci::JobArtifact.count }.by(-6)
+ expect(Ci::JobArtifact.where(id: unlockable_artifact_records.map(&:id))).to be_empty
+ end
+ end
end
end
diff --git a/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb b/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb
index 1cc856734fc..0e7230c042e 100644
--- a/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb
+++ b/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb
@@ -3,59 +3,74 @@
require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DestroyBatchService do
- let(:artifacts) { Ci::JobArtifact.all }
+ let(:artifacts) { Ci::JobArtifact.where(id: [artifact_with_file.id, artifact_without_file.id]) }
let(:service) { described_class.new(artifacts, pick_up_at: Time.current) }
+ let_it_be(:artifact_with_file, refind: true) do
+ create(:ci_job_artifact, :zip)
+ end
+
+ let_it_be(:artifact_without_file, refind: true) do
+ create(:ci_job_artifact)
+ end
+
+ let_it_be(:undeleted_artifact, refind: true) do
+ create(:ci_job_artifact)
+ end
+
describe '.execute' do
subject(:execute) { service.execute }
- let_it_be(:artifact, refind: true) do
- create(:ci_job_artifact)
+ it 'creates a deleted object for artifact with attached file' do
+ expect { subject }.to change { Ci::DeletedObject.count }.by(1)
end
- context 'when the artifact has a file attached to it' do
- before do
- artifact.file = fixture_file_upload(Rails.root.join('spec/fixtures/ci_build_artifacts.zip'), 'application/zip')
- artifact.save!
- end
+ it 'does not remove the attached file' do
+ expect { execute }.not_to change { artifact_with_file.file.exists? }
+ end
- it 'creates a deleted object' do
- expect { subject }.to change { Ci::DeletedObject.count }.by(1)
- end
+ it 'deletes the artifact records' do
+ expect { subject }.to change { Ci::JobArtifact.count }.by(-2)
+ end
- it 'does not remove the files' do
- expect { execute }.not_to change { artifact.file.exists? }
+ it 'reports metrics for destroyed artifacts' do
+ expect_next_instance_of(Gitlab::Ci::Artifacts::Metrics) do |metrics|
+ expect(metrics).to receive(:increment_destroyed_artifacts_count).with(2).and_call_original
+ expect(metrics).to receive(:increment_destroyed_artifacts_bytes).with(107464).and_call_original
end
- it 'reports metrics for destroyed artifacts' do
- expect_next_instance_of(Gitlab::Ci::Artifacts::Metrics) do |metrics|
- expect(metrics).to receive(:increment_destroyed_artifacts_count).with(1).and_call_original
- expect(metrics).to receive(:increment_destroyed_artifacts_bytes).with(107464).and_call_original
- end
+ execute
+ end
+
+ context 'ProjectStatistics' do
+ it 'resets project statistics' do
+ expect(ProjectStatistics).to receive(:increment_statistic).once
+ .with(artifact_with_file.project, :build_artifacts_size, -artifact_with_file.file.size)
+ .and_call_original
+ expect(ProjectStatistics).to receive(:increment_statistic).once
+ .with(artifact_without_file.project, :build_artifacts_size, 0)
+ .and_call_original
execute
end
- context 'ProjectStatistics' do
- it 'resets project statistics' do
- expect(ProjectStatistics).to receive(:increment_statistic).once
- .with(artifact.project, :build_artifacts_size, -artifact.file.size)
- .and_call_original
+ context 'with update_stats: false' do
+ it 'does not update project statistics' do
+ expect(ProjectStatistics).not_to receive(:increment_statistic)
- execute
+ service.execute(update_stats: false)
end
- context 'with update_stats: false' do
- it 'does not update project statistics' do
- expect(ProjectStatistics).not_to receive(:increment_statistic)
-
- service.execute(update_stats: false)
- end
+ it 'returns size statistics' do
+ expected_updates = {
+ statistics_updates: {
+ artifact_with_file.project => -artifact_with_file.file.size,
+ artifact_without_file.project => 0
+ }
+ }
- it 'returns size statistics' do
- expect(service.execute(update_stats: false)).to match(
- a_hash_including(statistics_updates: { artifact.project => -artifact.file.size }))
- end
+ expect(service.execute(update_stats: false)).to match(
+ a_hash_including(expected_updates))
end
end
end
@@ -71,7 +86,7 @@ RSpec.describe Ci::JobArtifacts::DestroyBatchService do
it 'raises an exception and stop destroying' do
expect { execute }.to raise_error(ActiveRecord::RecordNotDestroyed)
- .and not_change { Ci::JobArtifact.count }.from(1)
+ .and not_change { Ci::JobArtifact.count }
end
end
end
diff --git a/spec/services/ci/parse_dotenv_artifact_service_spec.rb b/spec/services/ci/parse_dotenv_artifact_service_spec.rb
index c4040a426f2..6bf22b7c8b2 100644
--- a/spec/services/ci/parse_dotenv_artifact_service_spec.rb
+++ b/spec/services/ci/parse_dotenv_artifact_service_spec.rb
@@ -23,6 +23,46 @@ RSpec.describe Ci::ParseDotenvArtifactService do
hash_including('key' => 'KEY2', 'value' => 'VAR2'))
end
+ context 'when dotenv variables are conflicting against manual variables' do
+ before do
+ create(:ci_job_variable, job: build, key: 'KEY1')
+ end
+
+ it 'returns an error message that there is a duplicate variable' do
+ subject
+
+ expect(subject[:status]).to eq(:error)
+ expect(subject[:message]).to include("Key (key, job_id)=(KEY1, #{build.id}) already exists.")
+ expect(subject[:http_status]).to eq(:bad_request)
+ end
+ end
+
+ context 'when dotenv variables have duplicate variables' do
+ let!(:artifact) { create(:ci_job_artifact, :dotenv, job: build) }
+ let(:blob) do
+ <<~EOS
+ KEY1=VAR1
+ KEY2=VAR2
+ KEY2=VAR3
+ KEY1=VAR4
+ EOS
+ end
+
+ before do
+ allow(artifact).to receive(:each_blob).and_yield(blob)
+ end
+
+ it 'latest values get used' do
+ subject
+
+ expect(subject[:status]).to eq(:success)
+
+ expect(build.job_variables.as_json).to contain_exactly(
+ hash_including('key' => 'KEY1', 'value' => 'VAR4'),
+ hash_including('key' => 'KEY2', 'value' => 'VAR3'))
+ end
+ end
+
context 'when parse error happens' do
before do
allow(service).to receive(:scan_line!) { raise described_class::ParserError, 'Invalid Format' }
diff --git a/spec/services/ci/pipeline_processing/atomic_processing_service_spec.rb b/spec/services/ci/pipeline_processing/atomic_processing_service_spec.rb
index a66d3898c5c..02f8f2dd99f 100644
--- a/spec/services/ci/pipeline_processing/atomic_processing_service_spec.rb
+++ b/spec/services/ci/pipeline_processing/atomic_processing_service_spec.rb
@@ -1,12 +1,1106 @@
# frozen_string_literal: true
require 'spec_helper'
-require_relative 'shared_processing_service'
-require_relative 'shared_processing_service_tests_with_yaml'
RSpec.describe Ci::PipelineProcessing::AtomicProcessingService do
- it_behaves_like 'Pipeline Processing Service'
- it_behaves_like 'Pipeline Processing Service Tests With Yaml'
+ describe 'Pipeline Processing Service Tests With Yaml' do
+ let_it_be(:project) { create(:project, :repository) }
+ let_it_be(:user) { project.owner }
+
+ where(:test_file_path) do
+ Dir.glob(Rails.root.join('spec/services/ci/pipeline_processing/test_cases/*.yml'))
+ end
+
+ with_them do
+ let(:test_file) { YAML.load_file(test_file_path) }
+ let(:pipeline) { Ci::CreatePipelineService.new(project, user, ref: 'master').execute(:pipeline).payload }
+
+ before do
+ stub_ci_pipeline_yaml_file(YAML.dump(test_file['config']))
+ end
+
+ it 'follows transitions' do
+ expect(pipeline).to be_persisted
+ Sidekiq::Worker.drain_all # ensure that all async jobs are executed
+ check_expectation(test_file.dig('init', 'expect'), "init")
+
+ test_file['transitions'].each_with_index do |transition, idx|
+ event_on_jobs(transition['event'], transition['jobs'])
+ Sidekiq::Worker.drain_all # ensure that all async jobs are executed
+ check_expectation(transition['expect'], "transition:#{idx}")
+ end
+ end
+
+ private
+
+ def check_expectation(expectation, message)
+ expect(current_state.deep_stringify_keys).to eq(expectation), message
+ end
+
+ def current_state
+ # reload pipeline and all relations
+ pipeline.reload
+
+ {
+ pipeline: pipeline.status,
+ stages: pipeline.stages.pluck(:name, :status).to_h,
+ jobs: pipeline.latest_statuses.pluck(:name, :status).to_h
+ }
+ end
+
+ def event_on_jobs(event, job_names)
+ statuses = pipeline.latest_statuses.by_name(job_names).to_a
+ expect(statuses.count).to eq(job_names.count) # ensure that we have the same counts
+
+ statuses.each do |status|
+ if event == 'play'
+ status.play(user)
+ else
+ status.public_send("#{event}!")
+ end
+ end
+ end
+ end
+ end
+
+ describe 'Pipeline Processing Service' do
+ let(:project) { create(:project, :repository) }
+ let(:user) { project.owner }
+
+ let(:pipeline) do
+ create(:ci_empty_pipeline, ref: 'master', project: project)
+ end
+
+ context 'when simple pipeline is defined' do
+ before do
+ create_build('linux', stage_idx: 0)
+ create_build('mac', stage_idx: 0)
+ create_build('rspec', stage_idx: 1)
+ create_build('rubocop', stage_idx: 1)
+ create_build('deploy', stage_idx: 2)
+ end
+
+ it 'processes a pipeline', :sidekiq_inline do
+ expect(process_pipeline).to be_truthy
+
+ succeed_pending
+
+ expect(builds.success.count).to eq(2)
+
+ succeed_pending
+
+ expect(builds.success.count).to eq(4)
+
+ succeed_pending
+
+ expect(builds.success.count).to eq(5)
+ end
+
+ it 'does not process pipeline if existing stage is running' do
+ expect(process_pipeline).to be_truthy
+ expect(builds.pending.count).to eq(2)
+
+ expect(process_pipeline).to be_falsey
+ expect(builds.pending.count).to eq(2)
+ end
+ end
+
+ context 'custom stage with first job allowed to fail' do
+ before do
+ create_build('clean_job', stage_idx: 0, allow_failure: true)
+ create_build('test_job', stage_idx: 1, allow_failure: true)
+ end
+
+ it 'automatically triggers a next stage when build finishes', :sidekiq_inline do
+ expect(process_pipeline).to be_truthy
+ expect(builds_statuses).to eq ['pending']
+
+ fail_running_or_pending
+
+ expect(builds_statuses).to eq %w(failed pending)
+
+ fail_running_or_pending
+
+ expect(pipeline.reload).to be_success
+ end
+ end
+
+ context 'when optional manual actions are defined', :sidekiq_inline do
+ before do
+ create_build('build', stage_idx: 0)
+ create_build('test', stage_idx: 1)
+ create_build('test_failure', stage_idx: 2, when: 'on_failure')
+ create_build('deploy', stage_idx: 3)
+ create_build('production', stage_idx: 3, when: 'manual', allow_failure: true)
+ create_build('cleanup', stage_idx: 4, when: 'always')
+ create_build('clear:cache', stage_idx: 4, when: 'manual', allow_failure: true)
+ end
+
+ context 'when builds are successful' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names).to eq ['build']
+ expect(builds_statuses).to eq ['pending']
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test)
+ expect(builds_statuses).to eq %w(success pending)
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test deploy production)
+ expect(builds_statuses).to eq %w(success success pending manual)
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test deploy production cleanup clear:cache)
+ expect(builds_statuses).to eq %w(success success success manual pending manual)
+
+ succeed_running_or_pending
+
+ expect(builds_statuses).to eq %w(success success success manual success manual)
+ expect(pipeline.reload.status).to eq 'success'
+ end
+ end
+
+ context 'when test job fails' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names).to eq ['build']
+ expect(builds_statuses).to eq ['pending']
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test)
+ expect(builds_statuses).to eq %w(success pending)
+
+ fail_running_or_pending
+
+ expect(builds_names).to eq %w(build test test_failure)
+ expect(builds_statuses).to eq %w(success failed pending)
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test test_failure cleanup)
+ expect(builds_statuses).to eq %w(success failed success pending)
+
+ succeed_running_or_pending
+
+ expect(builds_statuses).to eq %w(success failed success success)
+ expect(pipeline.reload.status).to eq 'failed'
+ end
+ end
+
+ context 'when test and test_failure jobs fail' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names).to eq ['build']
+ expect(builds_statuses).to eq ['pending']
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test)
+ expect(builds_statuses).to eq %w(success pending)
+
+ fail_running_or_pending
+
+ expect(builds_names).to eq %w(build test test_failure)
+ expect(builds_statuses).to eq %w(success failed pending)
+
+ fail_running_or_pending
+
+ expect(builds_names).to eq %w(build test test_failure cleanup)
+ expect(builds_statuses).to eq %w(success failed failed pending)
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test test_failure cleanup)
+ expect(builds_statuses).to eq %w(success failed failed success)
+ expect(pipeline.reload.status).to eq('failed')
+ end
+ end
+
+ context 'when deploy job fails' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names).to eq ['build']
+ expect(builds_statuses).to eq ['pending']
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test)
+ expect(builds_statuses).to eq %w(success pending)
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w(build test deploy production)
+ expect(builds_statuses).to eq %w(success success pending manual)
+
+ fail_running_or_pending
+
+ expect(builds_names).to eq %w(build test deploy production cleanup)
+ expect(builds_statuses).to eq %w(success success failed manual pending)
+
+ succeed_running_or_pending
+
+ expect(builds_statuses).to eq %w(success success failed manual success)
+ expect(pipeline.reload).to be_failed
+ end
+ end
+
+ context 'when build is canceled in the second stage' do
+ it 'does not schedule builds after build has been canceled' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names).to eq ['build']
+ expect(builds_statuses).to eq ['pending']
+
+ succeed_running_or_pending
+
+ expect(builds.running_or_pending).not_to be_empty
+ expect(builds_names).to eq %w(build test)
+ expect(builds_statuses).to eq %w(success pending)
+
+ cancel_running_or_pending
+
+ expect(builds.running_or_pending).to be_empty
+ expect(builds_names).to eq %w[build test]
+ expect(builds_statuses).to eq %w[success canceled]
+ expect(pipeline.reload).to be_canceled
+ end
+ end
+
+ context 'when listing optional manual actions' do
+ it 'returns only for skipped builds' do
+ # currently all builds are created
+ expect(process_pipeline).to be_truthy
+ expect(manual_actions).to be_empty
+
+ # succeed stage build
+ succeed_running_or_pending
+
+ expect(manual_actions).to be_empty
+
+ # succeed stage test
+ succeed_running_or_pending
+
+ expect(manual_actions).to be_one # production
+
+ # succeed stage deploy
+ succeed_running_or_pending
+
+ expect(manual_actions).to be_many # production and clear cache
+ end
+ end
+ end
+
+ context 'when delayed jobs are defined', :sidekiq_inline do
+ context 'when the scene is timed incremental rollout' do
+ before do
+ create_build('build', stage_idx: 0)
+ create_build('rollout10%', **delayed_options, stage_idx: 1)
+ create_build('rollout100%', **delayed_options, stage_idx: 2)
+ create_build('cleanup', stage_idx: 3)
+
+ allow(Ci::BuildScheduleWorker).to receive(:perform_at)
+ end
+
+ context 'when builds are successful' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
+
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
+
+ travel_to 2.minutes.from_now do
+ enqueue_scheduled('rollout10%')
+ end
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
+
+ travel_to 2.minutes.from_now do
+ enqueue_scheduled('rollout100%')
+ end
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'pending' })
+
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'success' })
+ expect(pipeline.reload.status).to eq 'success'
+ end
+ end
+
+ context 'when build job fails' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
+
+ fail_running_or_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'failed' })
+ expect(pipeline.reload.status).to eq 'failed'
+ end
+ end
+
+ context 'when rollout 10% is unscheduled' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
+
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
+
+ unschedule
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'manual' })
+ expect(pipeline.reload.status).to eq 'manual'
+ end
+
+ context 'when user plays rollout 10%' do
+ it 'schedules rollout100%' do
+ process_pipeline
+ succeed_pending
+ unschedule
+ play_manual_action('rollout10%')
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
+ expect(pipeline.reload.status).to eq 'scheduled'
+ end
+ end
+ end
+
+ context 'when rollout 10% fails' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
+
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
+
+ travel_to 2.minutes.from_now do
+ enqueue_scheduled('rollout10%')
+ end
+ fail_running_or_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'failed' })
+ expect(pipeline.reload.status).to eq 'failed'
+ end
+
+ context 'when user retries rollout 10%' do
+ it 'does not schedule rollout10% again' do
+ process_pipeline
+ succeed_pending
+ enqueue_scheduled('rollout10%')
+ fail_running_or_pending
+ retry_build('rollout10%')
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
+ expect(pipeline.reload.status).to eq 'running'
+ end
+ end
+ end
+
+ context 'when rollout 10% is played immidiately' do
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
+
+ succeed_pending
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
+
+ play_manual_action('rollout10%')
+
+ expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
+ expect(pipeline.reload.status).to eq 'running'
+ end
+ end
+ end
+
+ context 'when only one scheduled job exists in a pipeline' do
+ before do
+ create_build('delayed', **delayed_options, stage_idx: 0)
+
+ allow(Ci::BuildScheduleWorker).to receive(:perform_at)
+ end
+
+ it 'properly processes the pipeline' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
+
+ expect(pipeline.reload.status).to eq 'scheduled'
+ end
+ end
+
+ context 'when there are two delayed jobs in a stage' do
+ before do
+ create_build('delayed1', **delayed_options, stage_idx: 0)
+ create_build('delayed2', **delayed_options, stage_idx: 0)
+ create_build('job', stage_idx: 1)
+
+ allow(Ci::BuildScheduleWorker).to receive(:perform_at)
+ end
+
+ it 'blocks the stage until all scheduled jobs finished' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'delayed1': 'scheduled', 'delayed2': 'scheduled' })
+
+ travel_to 2.minutes.from_now do
+ enqueue_scheduled('delayed1')
+ end
+
+ expect(builds_names_and_statuses).to eq({ 'delayed1': 'pending', 'delayed2': 'scheduled' })
+ expect(pipeline.reload.status).to eq 'running'
+ end
+ end
+
+ context 'when a delayed job is allowed to fail' do
+ before do
+ create_build('delayed', **delayed_options, allow_failure: true, stage_idx: 0)
+ create_build('job', stage_idx: 1)
+
+ allow(Ci::BuildScheduleWorker).to receive(:perform_at)
+ end
+
+ it 'blocks the stage and continues after it failed' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
+
+ travel_to 2.minutes.from_now do
+ enqueue_scheduled('delayed')
+ end
+ fail_running_or_pending
+
+ expect(builds_names_and_statuses).to eq({ 'delayed': 'failed', 'job': 'pending' })
+ expect(pipeline.reload.status).to eq 'pending'
+ end
+ end
+ end
+
+ context 'when an exception is raised during a persistent ref creation' do
+ before do
+ successful_build('test', stage_idx: 0)
+
+ allow_next_instance_of(Ci::PersistentRef) do |instance|
+ allow(instance).to receive(:delete_refs) { raise ArgumentError }
+ end
+ end
+
+ it 'process the pipeline' do
+ expect { process_pipeline }.not_to raise_error
+ end
+ end
+
+ context 'when there are manual action in earlier stages' do
+ context 'when first stage has only optional manual actions' do
+ before do
+ create_build('build', stage_idx: 0, when: 'manual', allow_failure: true)
+ create_build('check', stage_idx: 1)
+ create_build('test', stage_idx: 2)
+
+ process_pipeline
+ end
+
+ it 'starts from the second stage' do
+ expect(all_builds_statuses).to eq %w[manual pending created]
+ end
+ end
+
+ context 'when second stage has only optional manual actions' do
+ before do
+ create_build('check', stage_idx: 0)
+ create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
+ create_build('test', stage_idx: 2)
+
+ process_pipeline
+ end
+
+ it 'skips second stage and continues on third stage', :sidekiq_inline do
+ expect(all_builds_statuses).to eq(%w[pending created created])
+
+ builds.first.success
+
+ expect(all_builds_statuses).to eq(%w[success manual pending])
+ end
+ end
+ end
+
+ context 'when there are only manual actions in stages' do
+ before do
+ create_build('image', stage_idx: 0, when: 'manual', allow_failure: true)
+ create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
+ create_build('deploy', stage_idx: 2, when: 'manual')
+ create_build('check', stage_idx: 3)
+
+ process_pipeline
+ end
+
+ it 'processes all jobs until blocking actions encountered' do
+ expect(all_builds_statuses).to eq(%w[manual manual manual created])
+ expect(all_builds_names).to eq(%w[image build deploy check])
+
+ expect(pipeline.reload).to be_blocked
+ end
+ end
+
+ context 'when there is only one manual action' do
+ before do
+ create_build('deploy', stage_idx: 0, when: 'manual', allow_failure: true)
+
+ process_pipeline
+ end
+
+ it 'skips the pipeline' do
+ expect(pipeline.reload).to be_skipped
+ end
+
+ context 'when the action was played' do
+ before do
+ play_manual_action('deploy')
+ end
+
+ it 'queues the action and pipeline', :sidekiq_inline do
+ expect(all_builds_statuses).to eq(%w[pending])
+
+ expect(pipeline.reload).to be_pending
+ end
+ end
+ end
+
+ context 'when blocking manual actions are defined', :sidekiq_inline do
+ before do
+ create_build('code:test', stage_idx: 0)
+ create_build('staging:deploy', stage_idx: 1, when: 'manual')
+ create_build('staging:test', stage_idx: 2, when: 'on_success')
+ create_build('production:deploy', stage_idx: 3, when: 'manual')
+ create_build('production:test', stage_idx: 4, when: 'always')
+ end
+
+ context 'when first stage succeeds' do
+ it 'blocks pipeline on stage with first manual action' do
+ process_pipeline
+
+ expect(builds_names).to eq %w[code:test]
+ expect(builds_statuses).to eq %w[pending]
+ expect(pipeline.reload.status).to eq 'pending'
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[code:test staging:deploy]
+ expect(builds_statuses).to eq %w[success manual]
+ expect(pipeline.reload).to be_manual
+ end
+ end
+
+ context 'when first stage fails' do
+ it 'does not take blocking action into account' do
+ process_pipeline
+
+ expect(builds_names).to eq %w[code:test]
+ expect(builds_statuses).to eq %w[pending]
+ expect(pipeline.reload.status).to eq 'pending'
+
+ fail_running_or_pending
+
+ expect(builds_names).to eq %w[code:test production:test]
+ expect(builds_statuses).to eq %w[failed pending]
+
+ succeed_running_or_pending
+
+ expect(builds_statuses).to eq %w[failed success]
+ expect(pipeline.reload).to be_failed
+ end
+ end
+
+ context 'when pipeline is promoted sequentially up to the end' do
+ before do
+ # Users need ability to merge into a branch in order to trigger
+ # protected manual actions.
+ #
+ create(:protected_branch, :developers_can_merge,
+ name: 'master', project: project)
+ end
+
+ it 'properly processes entire pipeline' do
+ process_pipeline
+
+ expect(builds_names).to eq %w[code:test]
+ expect(builds_statuses).to eq %w[pending]
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[code:test staging:deploy]
+ expect(builds_statuses).to eq %w[success manual]
+ expect(pipeline.reload).to be_manual
+
+ play_manual_action('staging:deploy')
+
+ expect(builds_statuses).to eq %w[success pending]
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[code:test staging:deploy staging:test]
+ expect(builds_statuses).to eq %w[success success pending]
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[code:test staging:deploy staging:test
+ production:deploy]
+ expect(builds_statuses).to eq %w[success success success manual]
+
+ expect(pipeline.reload).to be_manual
+ expect(pipeline.reload).to be_blocked
+ expect(pipeline.reload).not_to be_active
+ expect(pipeline.reload).not_to be_complete
+
+ play_manual_action('production:deploy')
+
+ expect(builds_statuses).to eq %w[success success success pending]
+ expect(pipeline.reload).to be_running
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[code:test staging:deploy staging:test
+ production:deploy production:test]
+ expect(builds_statuses).to eq %w[success success success success pending]
+ expect(pipeline.reload).to be_running
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[code:test staging:deploy staging:test
+ production:deploy production:test]
+ expect(builds_statuses).to eq %w[success success success success success]
+ expect(pipeline.reload).to be_success
+ end
+ end
+ end
+
+ context 'when second stage has only on_failure jobs', :sidekiq_inline do
+ before do
+ create_build('check', stage_idx: 0)
+ create_build('build', stage_idx: 1, when: 'on_failure')
+ create_build('test', stage_idx: 2)
+
+ process_pipeline
+ end
+
+ it 'skips second stage and continues on third stage' do
+ expect(all_builds_statuses).to eq(%w[pending created created])
+
+ builds.first.success
+
+ expect(all_builds_statuses).to eq(%w[success skipped pending])
+ end
+ end
+
+ context 'when failed build in the middle stage is retried', :sidekiq_inline do
+ context 'when failed build is the only unsuccessful build in the stage' do
+ before do
+ create_build('build:1', stage_idx: 0)
+ create_build('build:2', stage_idx: 0)
+ create_build('test:1', stage_idx: 1)
+ create_build('test:2', stage_idx: 1)
+ create_build('deploy:1', stage_idx: 2)
+ create_build('deploy:2', stage_idx: 2)
+ end
+
+ it 'does trigger builds in the next stage' do
+ expect(process_pipeline).to be_truthy
+ expect(builds_names).to eq ['build:1', 'build:2']
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
+
+ pipeline.builds.find_by(name: 'test:1').success!
+ pipeline.builds.find_by(name: 'test:2').drop!
+
+ expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
+
+ Ci::Build.retry(pipeline.builds.find_by(name: 'test:2'), user).reset.success!
+
+ expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2',
+ 'test:2', 'deploy:1', 'deploy:2']
+ end
+ end
+ end
+
+ context 'when builds with auto-retries are configured', :sidekiq_inline do
+ before do
+ create_build('build:1', stage_idx: 0, user: user, options: { script: 'aa', retry: 2 })
+ create_build('test:1', stage_idx: 1, user: user, when: :on_failure)
+ create_build('test:2', stage_idx: 1, user: user, options: { script: 'aa', retry: 1 })
+ end
+
+ it 'automatically retries builds in a valid order' do
+ expect(process_pipeline).to be_truthy
+
+ fail_running_or_pending
+
+ expect(builds_names).to eq %w[build:1 build:1]
+ expect(builds_statuses).to eq %w[failed pending]
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[build:1 build:1 test:2]
+ expect(builds_statuses).to eq %w[failed success pending]
+
+ succeed_running_or_pending
+
+ expect(builds_names).to eq %w[build:1 build:1 test:2]
+ expect(builds_statuses).to eq %w[failed success success]
+
+ expect(pipeline.reload).to be_success
+ end
+ end
+
+ context 'when pipeline with needs is created', :sidekiq_inline do
+ let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
+ let!(:mac_build) { create_build('mac:build', stage: 'build', stage_idx: 0) }
+ let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
+ let!(:linux_rubocop) { create_build('linux:rubocop', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
+ let!(:mac_rspec) { create_build('mac:rspec', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
+ let!(:mac_rubocop) { create_build('mac:rubocop', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
+ let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2) }
+
+ let!(:linux_rspec_on_build) { create(:ci_build_need, build: linux_rspec, name: 'linux:build') }
+ let!(:linux_rubocop_on_build) { create(:ci_build_need, build: linux_rubocop, name: 'linux:build') }
+
+ let!(:mac_rspec_on_build) { create(:ci_build_need, build: mac_rspec, name: 'mac:build') }
+ let!(:mac_rubocop_on_build) { create(:ci_build_need, build: mac_rubocop, name: 'mac:build') }
+
+ it 'when linux:* finishes first it runs it out of order' do
+ expect(process_pipeline).to be_truthy
+
+ expect(stages).to eq(%w(pending created created))
+ expect(builds.pending).to contain_exactly(linux_build, mac_build)
+
+ # we follow the single path of linux
+ linux_build.reset.success!
+
+ expect(stages).to eq(%w(running pending created))
+ expect(builds.success).to contain_exactly(linux_build)
+ expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
+
+ linux_rspec.reset.success!
+
+ expect(stages).to eq(%w(running running created))
+ expect(builds.success).to contain_exactly(linux_build, linux_rspec)
+ expect(builds.pending).to contain_exactly(mac_build, linux_rubocop)
+
+ linux_rubocop.reset.success!
+
+ expect(stages).to eq(%w(running running created))
+ expect(builds.success).to contain_exactly(linux_build, linux_rspec, linux_rubocop)
+ expect(builds.pending).to contain_exactly(mac_build)
+
+ mac_build.reset.success!
+ mac_rspec.reset.success!
+ mac_rubocop.reset.success!
+
+ expect(stages).to eq(%w(success success pending))
+ expect(builds.success).to contain_exactly(
+ linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
+ expect(builds.pending).to contain_exactly(deploy)
+ end
+
+ context 'when one of the jobs is run on a failure' do
+ let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure', scheduling_type: :dag) }
+
+ let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') }
+
+ context 'when another job in build phase fails first' do
+ it 'does skip linux:notify' do
+ expect(process_pipeline).to be_truthy
+
+ mac_build.reset.drop!
+ linux_build.reset.success!
+
+ expect(linux_notify.reset).to be_skipped
+ end
+ end
+
+ context 'when linux:build job fails first' do
+ it 'does run linux:notify' do
+ expect(process_pipeline).to be_truthy
+
+ linux_build.reset.drop!
+
+ expect(linux_notify.reset).to be_pending
+ end
+ end
+ end
+
+ context 'when there is a job scheduled with dag but no need (needs: [])' do
+ let!(:deploy_pages) { create_build('deploy_pages', stage: 'deploy', stage_idx: 2, scheduling_type: :dag) }
+
+ it 'runs deploy_pages without waiting prior stages' do
+ expect(process_pipeline).to be_truthy
+
+ expect(stages).to eq(%w(pending created pending))
+ expect(builds.pending).to contain_exactly(linux_build, mac_build, deploy_pages)
+
+ linux_build.reset.success!
+ deploy_pages.reset.success!
+
+ expect(stages).to eq(%w(running pending running))
+ expect(builds.success).to contain_exactly(linux_build, deploy_pages)
+ expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
+
+ linux_rspec.reset.success!
+ linux_rubocop.reset.success!
+ mac_build.reset.success!
+ mac_rspec.reset.success!
+ mac_rubocop.reset.success!
+
+ expect(stages).to eq(%w(success success running))
+ expect(builds.pending).to contain_exactly(deploy)
+ end
+ end
+ end
+
+ context 'when a needed job is skipped', :sidekiq_inline do
+ let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
+ let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1) }
+ let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2, scheduling_type: :dag) }
+
+ before do
+ create(:ci_build_need, build: deploy, name: 'linux:build')
+ end
+
+ it 'skips the jobs depending on it' do
+ expect(process_pipeline).to be_truthy
+
+ expect(stages).to eq(%w(pending created created))
+ expect(all_builds.pending).to contain_exactly(linux_build)
+
+ linux_build.reset.drop!
+
+ expect(stages).to eq(%w(failed skipped skipped))
+ expect(all_builds.failed).to contain_exactly(linux_build)
+ expect(all_builds.skipped).to contain_exactly(linux_rspec, deploy)
+ end
+ end
+
+ context 'when a needed job is manual', :sidekiq_inline do
+ let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0, when: 'manual', allow_failure: true) }
+ let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 1, scheduling_type: :dag) }
+
+ before do
+ create(:ci_build_need, build: deploy, name: 'linux:build')
+ end
+
+ it 'makes deploy DAG to be skipped' do
+ expect(process_pipeline).to be_truthy
+
+ expect(stages).to eq(%w(skipped skipped))
+ expect(all_builds.manual).to contain_exactly(linux_build)
+ expect(all_builds.skipped).to contain_exactly(deploy)
+ end
+ end
+
+ context 'when a bridge job has parallel:matrix config', :sidekiq_inline do
+ let(:parent_config) do
+ <<-EOY
+ test:
+ stage: test
+ script: echo test
+
+ deploy:
+ stage: deploy
+ trigger:
+ include: .child.yml
+ parallel:
+ matrix:
+ - PROVIDER: ovh
+ STACK: [monitoring, app]
+ EOY
+ end
+
+ let(:child_config) do
+ <<-EOY
+ test:
+ stage: test
+ script: echo test
+ EOY
+ end
+
+ let(:pipeline) do
+ Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
+ end
+
+ before do
+ allow_next_instance_of(Repository) do |repository|
+ allow(repository)
+ .to receive(:blob_data_at)
+ .with(an_instance_of(String), '.gitlab-ci.yml')
+ .and_return(parent_config)
+
+ allow(repository)
+ .to receive(:blob_data_at)
+ .with(an_instance_of(String), '.child.yml')
+ .and_return(child_config)
+ end
+ end
+
+ it 'creates pipeline with bridges, then passes the matrix variables to downstream jobs' do
+ expect(all_builds_names).to contain_exactly('test', 'deploy: [ovh, monitoring]', 'deploy: [ovh, app]')
+ expect(all_builds_statuses).to contain_exactly('pending', 'created', 'created')
+
+ succeed_pending
+
+ # bridge jobs directly transition to success
+ expect(all_builds_statuses).to contain_exactly('success', 'success', 'success')
+
+ bridge1 = all_builds.find_by(name: 'deploy: [ovh, monitoring]')
+ bridge2 = all_builds.find_by(name: 'deploy: [ovh, app]')
+
+ downstream_job1 = bridge1.downstream_pipeline.processables.first
+ downstream_job2 = bridge2.downstream_pipeline.processables.first
+
+ expect(downstream_job1.scoped_variables.to_hash).to include('PROVIDER' => 'ovh', 'STACK' => 'monitoring')
+ expect(downstream_job2.scoped_variables.to_hash).to include('PROVIDER' => 'ovh', 'STACK' => 'app')
+ end
+ end
+
+ context 'when a bridge job has invalid downstream project', :sidekiq_inline do
+ let(:config) do
+ <<-EOY
+ test:
+ stage: test
+ script: echo test
+
+ deploy:
+ stage: deploy
+ trigger:
+ project: invalid-project
+ EOY
+ end
+
+ let(:pipeline) do
+ Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
+ end
+
+ before do
+ stub_ci_pipeline_yaml_file(config)
+ end
+
+ it 'creates a pipeline, then fails the bridge job' do
+ expect(all_builds_names).to contain_exactly('test', 'deploy')
+ expect(all_builds_statuses).to contain_exactly('pending', 'created')
+
+ succeed_pending
+
+ expect(all_builds_names).to contain_exactly('test', 'deploy')
+ expect(all_builds_statuses).to contain_exactly('success', 'failed')
+ end
+ end
+
+ private
+
+ def all_builds
+ pipeline.processables.order(:stage_idx, :id)
+ end
+
+ def builds
+ all_builds.where.not(status: [:created, :skipped])
+ end
+
+ def stages
+ pipeline.reset.stages.map(&:status)
+ end
+
+ def builds_names
+ builds.pluck(:name)
+ end
+
+ def builds_names_and_statuses
+ builds.each_with_object({}) do |b, h|
+ h[b.name.to_sym] = b.status
+ h
+ end
+ end
+
+ def all_builds_names
+ all_builds.pluck(:name)
+ end
+
+ def builds_statuses
+ builds.pluck(:status)
+ end
+
+ def all_builds_statuses
+ all_builds.pluck(:status)
+ end
+
+ def succeed_pending
+ builds.pending.each do |build|
+ build.reset.success
+ end
+ end
+
+ def succeed_running_or_pending
+ pipeline.builds.running_or_pending.each do |build|
+ build.reset.success
+ end
+ end
+
+ def fail_running_or_pending
+ pipeline.builds.running_or_pending.each do |build|
+ build.reset.drop
+ end
+ end
+
+ def cancel_running_or_pending
+ pipeline.builds.running_or_pending.each do |build|
+ build.reset.cancel
+ end
+ end
+
+ def play_manual_action(name)
+ builds.find_by(name: name).play(user)
+ end
+
+ def enqueue_scheduled(name)
+ builds.scheduled.find_by(name: name).enqueue_scheduled
+ end
+
+ def retry_build(name)
+ Ci::Build.retry(builds.find_by(name: name), user)
+ end
+
+ def manual_actions
+ pipeline.manual_actions.reload
+ end
+
+ def create_build(name, **opts)
+ create(:ci_build, :created, pipeline: pipeline, name: name, **with_stage_opts(opts))
+ end
+
+ def successful_build(name, **opts)
+ create(:ci_build, :success, pipeline: pipeline, name: name, **with_stage_opts(opts))
+ end
+
+ def with_stage_opts(opts)
+ { stage: "stage-#{opts[:stage_idx].to_i}" }.merge(opts)
+ end
+
+ def delayed_options
+ { when: 'delayed', options: { script: %w(echo), start_in: '1 minute' } }
+ end
+
+ def unschedule
+ pipeline.builds.scheduled.map(&:unschedule)
+ end
+ end
private
diff --git a/spec/services/ci/pipeline_processing/shared_processing_service.rb b/spec/services/ci/pipeline_processing/shared_processing_service.rb
deleted file mode 100644
index 8de9b308429..00000000000
--- a/spec/services/ci/pipeline_processing/shared_processing_service.rb
+++ /dev/null
@@ -1,1040 +0,0 @@
-# frozen_string_literal: true
-
-RSpec.shared_examples 'Pipeline Processing Service' do
- let(:project) { create(:project, :repository) }
- let(:user) { project.owner }
-
- let(:pipeline) do
- create(:ci_empty_pipeline, ref: 'master', project: project)
- end
-
- context 'when simple pipeline is defined' do
- before do
- create_build('linux', stage_idx: 0)
- create_build('mac', stage_idx: 0)
- create_build('rspec', stage_idx: 1)
- create_build('rubocop', stage_idx: 1)
- create_build('deploy', stage_idx: 2)
- end
-
- it 'processes a pipeline', :sidekiq_inline do
- expect(process_pipeline).to be_truthy
-
- succeed_pending
-
- expect(builds.success.count).to eq(2)
-
- succeed_pending
-
- expect(builds.success.count).to eq(4)
-
- succeed_pending
-
- expect(builds.success.count).to eq(5)
- end
-
- it 'does not process pipeline if existing stage is running' do
- expect(process_pipeline).to be_truthy
- expect(builds.pending.count).to eq(2)
-
- expect(process_pipeline).to be_falsey
- expect(builds.pending.count).to eq(2)
- end
- end
-
- context 'custom stage with first job allowed to fail' do
- before do
- create_build('clean_job', stage_idx: 0, allow_failure: true)
- create_build('test_job', stage_idx: 1, allow_failure: true)
- end
-
- it 'automatically triggers a next stage when build finishes', :sidekiq_inline do
- expect(process_pipeline).to be_truthy
- expect(builds_statuses).to eq ['pending']
-
- fail_running_or_pending
-
- expect(builds_statuses).to eq %w(failed pending)
-
- fail_running_or_pending
-
- expect(pipeline.reload).to be_success
- end
- end
-
- context 'when optional manual actions are defined', :sidekiq_inline do
- before do
- create_build('build', stage_idx: 0)
- create_build('test', stage_idx: 1)
- create_build('test_failure', stage_idx: 2, when: 'on_failure')
- create_build('deploy', stage_idx: 3)
- create_build('production', stage_idx: 3, when: 'manual', allow_failure: true)
- create_build('cleanup', stage_idx: 4, when: 'always')
- create_build('clear:cache', stage_idx: 4, when: 'manual', allow_failure: true)
- end
-
- context 'when builds are successful' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names).to eq ['build']
- expect(builds_statuses).to eq ['pending']
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test)
- expect(builds_statuses).to eq %w(success pending)
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test deploy production)
- expect(builds_statuses).to eq %w(success success pending manual)
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test deploy production cleanup clear:cache)
- expect(builds_statuses).to eq %w(success success success manual pending manual)
-
- succeed_running_or_pending
-
- expect(builds_statuses).to eq %w(success success success manual success manual)
- expect(pipeline.reload.status).to eq 'success'
- end
- end
-
- context 'when test job fails' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names).to eq ['build']
- expect(builds_statuses).to eq ['pending']
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test)
- expect(builds_statuses).to eq %w(success pending)
-
- fail_running_or_pending
-
- expect(builds_names).to eq %w(build test test_failure)
- expect(builds_statuses).to eq %w(success failed pending)
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test test_failure cleanup)
- expect(builds_statuses).to eq %w(success failed success pending)
-
- succeed_running_or_pending
-
- expect(builds_statuses).to eq %w(success failed success success)
- expect(pipeline.reload.status).to eq 'failed'
- end
- end
-
- context 'when test and test_failure jobs fail' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names).to eq ['build']
- expect(builds_statuses).to eq ['pending']
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test)
- expect(builds_statuses).to eq %w(success pending)
-
- fail_running_or_pending
-
- expect(builds_names).to eq %w(build test test_failure)
- expect(builds_statuses).to eq %w(success failed pending)
-
- fail_running_or_pending
-
- expect(builds_names).to eq %w(build test test_failure cleanup)
- expect(builds_statuses).to eq %w(success failed failed pending)
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test test_failure cleanup)
- expect(builds_statuses).to eq %w(success failed failed success)
- expect(pipeline.reload.status).to eq('failed')
- end
- end
-
- context 'when deploy job fails' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names).to eq ['build']
- expect(builds_statuses).to eq ['pending']
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test)
- expect(builds_statuses).to eq %w(success pending)
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w(build test deploy production)
- expect(builds_statuses).to eq %w(success success pending manual)
-
- fail_running_or_pending
-
- expect(builds_names).to eq %w(build test deploy production cleanup)
- expect(builds_statuses).to eq %w(success success failed manual pending)
-
- succeed_running_or_pending
-
- expect(builds_statuses).to eq %w(success success failed manual success)
- expect(pipeline.reload).to be_failed
- end
- end
-
- context 'when build is canceled in the second stage' do
- it 'does not schedule builds after build has been canceled' do
- expect(process_pipeline).to be_truthy
- expect(builds_names).to eq ['build']
- expect(builds_statuses).to eq ['pending']
-
- succeed_running_or_pending
-
- expect(builds.running_or_pending).not_to be_empty
- expect(builds_names).to eq %w(build test)
- expect(builds_statuses).to eq %w(success pending)
-
- cancel_running_or_pending
-
- expect(builds.running_or_pending).to be_empty
- expect(builds_names).to eq %w[build test]
- expect(builds_statuses).to eq %w[success canceled]
- expect(pipeline.reload).to be_canceled
- end
- end
-
- context 'when listing optional manual actions' do
- it 'returns only for skipped builds' do
- # currently all builds are created
- expect(process_pipeline).to be_truthy
- expect(manual_actions).to be_empty
-
- # succeed stage build
- succeed_running_or_pending
-
- expect(manual_actions).to be_empty
-
- # succeed stage test
- succeed_running_or_pending
-
- expect(manual_actions).to be_one # production
-
- # succeed stage deploy
- succeed_running_or_pending
-
- expect(manual_actions).to be_many # production and clear cache
- end
- end
- end
-
- context 'when delayed jobs are defined', :sidekiq_inline do
- context 'when the scene is timed incremental rollout' do
- before do
- create_build('build', stage_idx: 0)
- create_build('rollout10%', **delayed_options, stage_idx: 1)
- create_build('rollout100%', **delayed_options, stage_idx: 2)
- create_build('cleanup', stage_idx: 3)
-
- allow(Ci::BuildScheduleWorker).to receive(:perform_at)
- end
-
- context 'when builds are successful' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
-
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
-
- travel_to 2.minutes.from_now do
- enqueue_scheduled('rollout10%')
- end
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
-
- travel_to 2.minutes.from_now do
- enqueue_scheduled('rollout100%')
- end
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'pending' })
-
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'success' })
- expect(pipeline.reload.status).to eq 'success'
- end
- end
-
- context 'when build job fails' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
-
- fail_running_or_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'failed' })
- expect(pipeline.reload.status).to eq 'failed'
- end
- end
-
- context 'when rollout 10% is unscheduled' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
-
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
-
- unschedule
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'manual' })
- expect(pipeline.reload.status).to eq 'manual'
- end
-
- context 'when user plays rollout 10%' do
- it 'schedules rollout100%' do
- process_pipeline
- succeed_pending
- unschedule
- play_manual_action('rollout10%')
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
- expect(pipeline.reload.status).to eq 'scheduled'
- end
- end
- end
-
- context 'when rollout 10% fails' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
-
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
-
- travel_to 2.minutes.from_now do
- enqueue_scheduled('rollout10%')
- end
- fail_running_or_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'failed' })
- expect(pipeline.reload.status).to eq 'failed'
- end
-
- context 'when user retries rollout 10%' do
- it 'does not schedule rollout10% again' do
- process_pipeline
- succeed_pending
- enqueue_scheduled('rollout10%')
- fail_running_or_pending
- retry_build('rollout10%')
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
- expect(pipeline.reload.status).to eq 'running'
- end
- end
- end
-
- context 'when rollout 10% is played immidiately' do
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
-
- succeed_pending
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
-
- play_manual_action('rollout10%')
-
- expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
- expect(pipeline.reload.status).to eq 'running'
- end
- end
- end
-
- context 'when only one scheduled job exists in a pipeline' do
- before do
- create_build('delayed', **delayed_options, stage_idx: 0)
-
- allow(Ci::BuildScheduleWorker).to receive(:perform_at)
- end
-
- it 'properly processes the pipeline' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
-
- expect(pipeline.reload.status).to eq 'scheduled'
- end
- end
-
- context 'when there are two delayed jobs in a stage' do
- before do
- create_build('delayed1', **delayed_options, stage_idx: 0)
- create_build('delayed2', **delayed_options, stage_idx: 0)
- create_build('job', stage_idx: 1)
-
- allow(Ci::BuildScheduleWorker).to receive(:perform_at)
- end
-
- it 'blocks the stage until all scheduled jobs finished' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'delayed1': 'scheduled', 'delayed2': 'scheduled' })
-
- travel_to 2.minutes.from_now do
- enqueue_scheduled('delayed1')
- end
-
- expect(builds_names_and_statuses).to eq({ 'delayed1': 'pending', 'delayed2': 'scheduled' })
- expect(pipeline.reload.status).to eq 'running'
- end
- end
-
- context 'when a delayed job is allowed to fail' do
- before do
- create_build('delayed', **delayed_options, allow_failure: true, stage_idx: 0)
- create_build('job', stage_idx: 1)
-
- allow(Ci::BuildScheduleWorker).to receive(:perform_at)
- end
-
- it 'blocks the stage and continues after it failed' do
- expect(process_pipeline).to be_truthy
- expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
-
- travel_to 2.minutes.from_now do
- enqueue_scheduled('delayed')
- end
- fail_running_or_pending
-
- expect(builds_names_and_statuses).to eq({ 'delayed': 'failed', 'job': 'pending' })
- expect(pipeline.reload.status).to eq 'pending'
- end
- end
- end
-
- context 'when an exception is raised during a persistent ref creation' do
- before do
- successful_build('test', stage_idx: 0)
-
- allow_next_instance_of(Ci::PersistentRef) do |instance|
- allow(instance).to receive(:delete_refs) { raise ArgumentError }
- end
- end
-
- it 'process the pipeline' do
- expect { process_pipeline }.not_to raise_error
- end
- end
-
- context 'when there are manual action in earlier stages' do
- context 'when first stage has only optional manual actions' do
- before do
- create_build('build', stage_idx: 0, when: 'manual', allow_failure: true)
- create_build('check', stage_idx: 1)
- create_build('test', stage_idx: 2)
-
- process_pipeline
- end
-
- it 'starts from the second stage' do
- expect(all_builds_statuses).to eq %w[manual pending created]
- end
- end
-
- context 'when second stage has only optional manual actions' do
- before do
- create_build('check', stage_idx: 0)
- create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
- create_build('test', stage_idx: 2)
-
- process_pipeline
- end
-
- it 'skips second stage and continues on third stage', :sidekiq_inline do
- expect(all_builds_statuses).to eq(%w[pending created created])
-
- builds.first.success
-
- expect(all_builds_statuses).to eq(%w[success manual pending])
- end
- end
- end
-
- context 'when there are only manual actions in stages' do
- before do
- create_build('image', stage_idx: 0, when: 'manual', allow_failure: true)
- create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
- create_build('deploy', stage_idx: 2, when: 'manual')
- create_build('check', stage_idx: 3)
-
- process_pipeline
- end
-
- it 'processes all jobs until blocking actions encountered' do
- expect(all_builds_statuses).to eq(%w[manual manual manual created])
- expect(all_builds_names).to eq(%w[image build deploy check])
-
- expect(pipeline.reload).to be_blocked
- end
- end
-
- context 'when there is only one manual action' do
- before do
- create_build('deploy', stage_idx: 0, when: 'manual', allow_failure: true)
-
- process_pipeline
- end
-
- it 'skips the pipeline' do
- expect(pipeline.reload).to be_skipped
- end
-
- context 'when the action was played' do
- before do
- play_manual_action('deploy')
- end
-
- it 'queues the action and pipeline', :sidekiq_inline do
- expect(all_builds_statuses).to eq(%w[pending])
-
- expect(pipeline.reload).to be_pending
- end
- end
- end
-
- context 'when blocking manual actions are defined', :sidekiq_inline do
- before do
- create_build('code:test', stage_idx: 0)
- create_build('staging:deploy', stage_idx: 1, when: 'manual')
- create_build('staging:test', stage_idx: 2, when: 'on_success')
- create_build('production:deploy', stage_idx: 3, when: 'manual')
- create_build('production:test', stage_idx: 4, when: 'always')
- end
-
- context 'when first stage succeeds' do
- it 'blocks pipeline on stage with first manual action' do
- process_pipeline
-
- expect(builds_names).to eq %w[code:test]
- expect(builds_statuses).to eq %w[pending]
- expect(pipeline.reload.status).to eq 'pending'
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[code:test staging:deploy]
- expect(builds_statuses).to eq %w[success manual]
- expect(pipeline.reload).to be_manual
- end
- end
-
- context 'when first stage fails' do
- it 'does not take blocking action into account' do
- process_pipeline
-
- expect(builds_names).to eq %w[code:test]
- expect(builds_statuses).to eq %w[pending]
- expect(pipeline.reload.status).to eq 'pending'
-
- fail_running_or_pending
-
- expect(builds_names).to eq %w[code:test production:test]
- expect(builds_statuses).to eq %w[failed pending]
-
- succeed_running_or_pending
-
- expect(builds_statuses).to eq %w[failed success]
- expect(pipeline.reload).to be_failed
- end
- end
-
- context 'when pipeline is promoted sequentially up to the end' do
- before do
- # Users need ability to merge into a branch in order to trigger
- # protected manual actions.
- #
- create(:protected_branch, :developers_can_merge,
- name: 'master', project: project)
- end
-
- it 'properly processes entire pipeline' do
- process_pipeline
-
- expect(builds_names).to eq %w[code:test]
- expect(builds_statuses).to eq %w[pending]
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[code:test staging:deploy]
- expect(builds_statuses).to eq %w[success manual]
- expect(pipeline.reload).to be_manual
-
- play_manual_action('staging:deploy')
-
- expect(builds_statuses).to eq %w[success pending]
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[code:test staging:deploy staging:test]
- expect(builds_statuses).to eq %w[success success pending]
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[code:test staging:deploy staging:test
- production:deploy]
- expect(builds_statuses).to eq %w[success success success manual]
-
- expect(pipeline.reload).to be_manual
- expect(pipeline.reload).to be_blocked
- expect(pipeline.reload).not_to be_active
- expect(pipeline.reload).not_to be_complete
-
- play_manual_action('production:deploy')
-
- expect(builds_statuses).to eq %w[success success success pending]
- expect(pipeline.reload).to be_running
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[code:test staging:deploy staging:test
- production:deploy production:test]
- expect(builds_statuses).to eq %w[success success success success pending]
- expect(pipeline.reload).to be_running
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[code:test staging:deploy staging:test
- production:deploy production:test]
- expect(builds_statuses).to eq %w[success success success success success]
- expect(pipeline.reload).to be_success
- end
- end
- end
-
- context 'when second stage has only on_failure jobs', :sidekiq_inline do
- before do
- create_build('check', stage_idx: 0)
- create_build('build', stage_idx: 1, when: 'on_failure')
- create_build('test', stage_idx: 2)
-
- process_pipeline
- end
-
- it 'skips second stage and continues on third stage' do
- expect(all_builds_statuses).to eq(%w[pending created created])
-
- builds.first.success
-
- expect(all_builds_statuses).to eq(%w[success skipped pending])
- end
- end
-
- context 'when failed build in the middle stage is retried', :sidekiq_inline do
- context 'when failed build is the only unsuccessful build in the stage' do
- before do
- create_build('build:1', stage_idx: 0)
- create_build('build:2', stage_idx: 0)
- create_build('test:1', stage_idx: 1)
- create_build('test:2', stage_idx: 1)
- create_build('deploy:1', stage_idx: 2)
- create_build('deploy:2', stage_idx: 2)
- end
-
- it 'does trigger builds in the next stage' do
- expect(process_pipeline).to be_truthy
- expect(builds_names).to eq ['build:1', 'build:2']
-
- succeed_running_or_pending
-
- expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
-
- pipeline.builds.find_by(name: 'test:1').success!
- pipeline.builds.find_by(name: 'test:2').drop!
-
- expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
-
- Ci::Build.retry(pipeline.builds.find_by(name: 'test:2'), user).reset.success!
-
- expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2',
- 'test:2', 'deploy:1', 'deploy:2']
- end
- end
- end
-
- context 'when builds with auto-retries are configured', :sidekiq_inline do
- before do
- create_build('build:1', stage_idx: 0, user: user, options: { script: 'aa', retry: 2 })
- create_build('test:1', stage_idx: 1, user: user, when: :on_failure)
- create_build('test:2', stage_idx: 1, user: user, options: { script: 'aa', retry: 1 })
- end
-
- it 'automatically retries builds in a valid order' do
- expect(process_pipeline).to be_truthy
-
- fail_running_or_pending
-
- expect(builds_names).to eq %w[build:1 build:1]
- expect(builds_statuses).to eq %w[failed pending]
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[build:1 build:1 test:2]
- expect(builds_statuses).to eq %w[failed success pending]
-
- succeed_running_or_pending
-
- expect(builds_names).to eq %w[build:1 build:1 test:2]
- expect(builds_statuses).to eq %w[failed success success]
-
- expect(pipeline.reload).to be_success
- end
- end
-
- context 'when pipeline with needs is created', :sidekiq_inline do
- let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
- let!(:mac_build) { create_build('mac:build', stage: 'build', stage_idx: 0) }
- let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
- let!(:linux_rubocop) { create_build('linux:rubocop', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
- let!(:mac_rspec) { create_build('mac:rspec', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
- let!(:mac_rubocop) { create_build('mac:rubocop', stage: 'test', stage_idx: 1, scheduling_type: :dag) }
- let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2) }
-
- let!(:linux_rspec_on_build) { create(:ci_build_need, build: linux_rspec, name: 'linux:build') }
- let!(:linux_rubocop_on_build) { create(:ci_build_need, build: linux_rubocop, name: 'linux:build') }
-
- let!(:mac_rspec_on_build) { create(:ci_build_need, build: mac_rspec, name: 'mac:build') }
- let!(:mac_rubocop_on_build) { create(:ci_build_need, build: mac_rubocop, name: 'mac:build') }
-
- it 'when linux:* finishes first it runs it out of order' do
- expect(process_pipeline).to be_truthy
-
- expect(stages).to eq(%w(pending created created))
- expect(builds.pending).to contain_exactly(linux_build, mac_build)
-
- # we follow the single path of linux
- linux_build.reset.success!
-
- expect(stages).to eq(%w(running pending created))
- expect(builds.success).to contain_exactly(linux_build)
- expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
-
- linux_rspec.reset.success!
-
- expect(stages).to eq(%w(running running created))
- expect(builds.success).to contain_exactly(linux_build, linux_rspec)
- expect(builds.pending).to contain_exactly(mac_build, linux_rubocop)
-
- linux_rubocop.reset.success!
-
- expect(stages).to eq(%w(running running created))
- expect(builds.success).to contain_exactly(linux_build, linux_rspec, linux_rubocop)
- expect(builds.pending).to contain_exactly(mac_build)
-
- mac_build.reset.success!
- mac_rspec.reset.success!
- mac_rubocop.reset.success!
-
- expect(stages).to eq(%w(success success pending))
- expect(builds.success).to contain_exactly(
- linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
- expect(builds.pending).to contain_exactly(deploy)
- end
-
- context 'when one of the jobs is run on a failure' do
- let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure', scheduling_type: :dag) }
-
- let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') }
-
- context 'when another job in build phase fails first' do
- it 'does skip linux:notify' do
- expect(process_pipeline).to be_truthy
-
- mac_build.reset.drop!
- linux_build.reset.success!
-
- expect(linux_notify.reset).to be_skipped
- end
- end
-
- context 'when linux:build job fails first' do
- it 'does run linux:notify' do
- expect(process_pipeline).to be_truthy
-
- linux_build.reset.drop!
-
- expect(linux_notify.reset).to be_pending
- end
- end
- end
-
- context 'when there is a job scheduled with dag but no need (needs: [])' do
- let!(:deploy_pages) { create_build('deploy_pages', stage: 'deploy', stage_idx: 2, scheduling_type: :dag) }
-
- it 'runs deploy_pages without waiting prior stages' do
- expect(process_pipeline).to be_truthy
-
- expect(stages).to eq(%w(pending created pending))
- expect(builds.pending).to contain_exactly(linux_build, mac_build, deploy_pages)
-
- linux_build.reset.success!
- deploy_pages.reset.success!
-
- expect(stages).to eq(%w(running pending running))
- expect(builds.success).to contain_exactly(linux_build, deploy_pages)
- expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
-
- linux_rspec.reset.success!
- linux_rubocop.reset.success!
- mac_build.reset.success!
- mac_rspec.reset.success!
- mac_rubocop.reset.success!
-
- expect(stages).to eq(%w(success success running))
- expect(builds.pending).to contain_exactly(deploy)
- end
- end
- end
-
- context 'when a needed job is skipped', :sidekiq_inline do
- let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
- let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1) }
- let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2, scheduling_type: :dag) }
-
- before do
- create(:ci_build_need, build: deploy, name: 'linux:build')
- end
-
- it 'skips the jobs depending on it' do
- expect(process_pipeline).to be_truthy
-
- expect(stages).to eq(%w(pending created created))
- expect(all_builds.pending).to contain_exactly(linux_build)
-
- linux_build.reset.drop!
-
- expect(stages).to eq(%w(failed skipped skipped))
- expect(all_builds.failed).to contain_exactly(linux_build)
- expect(all_builds.skipped).to contain_exactly(linux_rspec, deploy)
- end
- end
-
- context 'when a needed job is manual', :sidekiq_inline do
- let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0, when: 'manual', allow_failure: true) }
- let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 1, scheduling_type: :dag) }
-
- before do
- create(:ci_build_need, build: deploy, name: 'linux:build')
- end
-
- it 'makes deploy DAG to be skipped' do
- expect(process_pipeline).to be_truthy
-
- expect(stages).to eq(%w(skipped skipped))
- expect(all_builds.manual).to contain_exactly(linux_build)
- expect(all_builds.skipped).to contain_exactly(deploy)
- end
- end
-
- context 'when a bridge job has parallel:matrix config', :sidekiq_inline do
- let(:parent_config) do
- <<-EOY
- test:
- stage: test
- script: echo test
-
- deploy:
- stage: deploy
- trigger:
- include: .child.yml
- parallel:
- matrix:
- - PROVIDER: ovh
- STACK: [monitoring, app]
- EOY
- end
-
- let(:child_config) do
- <<-EOY
- test:
- stage: test
- script: echo test
- EOY
- end
-
- let(:pipeline) do
- Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
- end
-
- before do
- allow_next_instance_of(Repository) do |repository|
- allow(repository)
- .to receive(:blob_data_at)
- .with(an_instance_of(String), '.gitlab-ci.yml')
- .and_return(parent_config)
-
- allow(repository)
- .to receive(:blob_data_at)
- .with(an_instance_of(String), '.child.yml')
- .and_return(child_config)
- end
- end
-
- it 'creates pipeline with bridges, then passes the matrix variables to downstream jobs' do
- expect(all_builds_names).to contain_exactly('test', 'deploy: [ovh, monitoring]', 'deploy: [ovh, app]')
- expect(all_builds_statuses).to contain_exactly('pending', 'created', 'created')
-
- succeed_pending
-
- # bridge jobs directly transition to success
- expect(all_builds_statuses).to contain_exactly('success', 'success', 'success')
-
- bridge1 = all_builds.find_by(name: 'deploy: [ovh, monitoring]')
- bridge2 = all_builds.find_by(name: 'deploy: [ovh, app]')
-
- downstream_job1 = bridge1.downstream_pipeline.processables.first
- downstream_job2 = bridge2.downstream_pipeline.processables.first
-
- expect(downstream_job1.scoped_variables.to_hash).to include('PROVIDER' => 'ovh', 'STACK' => 'monitoring')
- expect(downstream_job2.scoped_variables.to_hash).to include('PROVIDER' => 'ovh', 'STACK' => 'app')
- end
- end
-
- context 'when a bridge job has invalid downstream project', :sidekiq_inline do
- let(:config) do
- <<-EOY
- test:
- stage: test
- script: echo test
-
- deploy:
- stage: deploy
- trigger:
- project: invalid-project
- EOY
- end
-
- let(:pipeline) do
- Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
- end
-
- before do
- stub_ci_pipeline_yaml_file(config)
- end
-
- it 'creates a pipeline, then fails the bridge job' do
- expect(all_builds_names).to contain_exactly('test', 'deploy')
- expect(all_builds_statuses).to contain_exactly('pending', 'created')
-
- succeed_pending
-
- expect(all_builds_names).to contain_exactly('test', 'deploy')
- expect(all_builds_statuses).to contain_exactly('success', 'failed')
- end
- end
-
- private
-
- def all_builds
- pipeline.processables.order(:stage_idx, :id)
- end
-
- def builds
- all_builds.where.not(status: [:created, :skipped])
- end
-
- def stages
- pipeline.reset.stages.map(&:status)
- end
-
- def builds_names
- builds.pluck(:name)
- end
-
- def builds_names_and_statuses
- builds.each_with_object({}) do |b, h|
- h[b.name.to_sym] = b.status
- h
- end
- end
-
- def all_builds_names
- all_builds.pluck(:name)
- end
-
- def builds_statuses
- builds.pluck(:status)
- end
-
- def all_builds_statuses
- all_builds.pluck(:status)
- end
-
- def succeed_pending
- builds.pending.each do |build|
- build.reset.success
- end
- end
-
- def succeed_running_or_pending
- pipeline.builds.running_or_pending.each do |build|
- build.reset.success
- end
- end
-
- def fail_running_or_pending
- pipeline.builds.running_or_pending.each do |build|
- build.reset.drop
- end
- end
-
- def cancel_running_or_pending
- pipeline.builds.running_or_pending.each do |build|
- build.reset.cancel
- end
- end
-
- def play_manual_action(name)
- builds.find_by(name: name).play(user)
- end
-
- def enqueue_scheduled(name)
- builds.scheduled.find_by(name: name).enqueue_scheduled
- end
-
- def retry_build(name)
- Ci::Build.retry(builds.find_by(name: name), user)
- end
-
- def manual_actions
- pipeline.manual_actions.reload
- end
-
- def create_build(name, **opts)
- create(:ci_build, :created, pipeline: pipeline, name: name, **with_stage_opts(opts))
- end
-
- def successful_build(name, **opts)
- create(:ci_build, :success, pipeline: pipeline, name: name, **with_stage_opts(opts))
- end
-
- def with_stage_opts(opts)
- { stage: "stage-#{opts[:stage_idx].to_i}" }.merge(opts)
- end
-
- def delayed_options
- { when: 'delayed', options: { script: %w(echo), start_in: '1 minute' } }
- end
-
- def unschedule
- pipeline.builds.scheduled.map(&:unschedule)
- end
-end
diff --git a/spec/services/ci/pipeline_processing/shared_processing_service_tests_with_yaml.rb b/spec/services/ci/pipeline_processing/shared_processing_service_tests_with_yaml.rb
deleted file mode 100644
index b4ad2512593..00000000000
--- a/spec/services/ci/pipeline_processing/shared_processing_service_tests_with_yaml.rb
+++ /dev/null
@@ -1,61 +0,0 @@
-# frozen_string_literal: true
-
-RSpec.shared_context 'Pipeline Processing Service Tests With Yaml' do
- let_it_be(:project) { create(:project, :repository) }
- let_it_be(:user) { project.owner }
-
- where(:test_file_path) do
- Dir.glob(Rails.root.join('spec/services/ci/pipeline_processing/test_cases/*.yml'))
- end
-
- with_them do
- let(:test_file) { YAML.load_file(test_file_path) }
- let(:pipeline) { Ci::CreatePipelineService.new(project, user, ref: 'master').execute(:pipeline).payload }
-
- before do
- stub_ci_pipeline_yaml_file(YAML.dump(test_file['config']))
- end
-
- it 'follows transitions' do
- expect(pipeline).to be_persisted
- Sidekiq::Worker.drain_all # ensure that all async jobs are executed
- check_expectation(test_file.dig('init', 'expect'), "init")
-
- test_file['transitions'].each_with_index do |transition, idx|
- event_on_jobs(transition['event'], transition['jobs'])
- Sidekiq::Worker.drain_all # ensure that all async jobs are executed
- check_expectation(transition['expect'], "transition:#{idx}")
- end
- end
-
- private
-
- def check_expectation(expectation, message)
- expect(current_state.deep_stringify_keys).to eq(expectation), message
- end
-
- def current_state
- # reload pipeline and all relations
- pipeline.reload
-
- {
- pipeline: pipeline.status,
- stages: pipeline.stages.pluck(:name, :status).to_h,
- jobs: pipeline.latest_statuses.pluck(:name, :status).to_h
- }
- end
-
- def event_on_jobs(event, job_names)
- statuses = pipeline.latest_statuses.by_name(job_names).to_a
- expect(statuses.count).to eq(job_names.count) # ensure that we have the same counts
-
- statuses.each do |status|
- if event == 'play'
- status.play(user)
- else
- status.public_send("#{event}!")
- end
- end
- end
- end
-end
diff --git a/spec/services/ci/play_build_service_spec.rb b/spec/services/ci/play_build_service_spec.rb
index babd601e0cf..34f77260334 100644
--- a/spec/services/ci/play_build_service_spec.rb
+++ b/spec/services/ci/play_build_service_spec.rb
@@ -79,12 +79,22 @@ RSpec.describe Ci::PlayBuildService, '#execute' do
{ key: 'second', secret_value: 'second' }]
end
+ subject { service.execute(build, job_variables) }
+
it 'assigns the variables to the build' do
- service.execute(build, job_variables)
+ subject
expect(build.reload.job_variables.map(&:key)).to contain_exactly('first', 'second')
end
+ context 'when variables are invalid' do
+ let(:job_variables) { [{}] }
+
+ it 'raises an error' do
+ expect { subject }.to raise_error(ActiveRecord::RecordInvalid)
+ end
+ end
+
context 'when user defined variables are restricted' do
before do
project.update!(restrict_user_defined_variables: true)
@@ -96,7 +106,7 @@ RSpec.describe Ci::PlayBuildService, '#execute' do
end
it 'assigns the variables to the build' do
- service.execute(build, job_variables)
+ subject
expect(build.reload.job_variables.map(&:key)).to contain_exactly('first', 'second')
end
@@ -104,8 +114,7 @@ RSpec.describe Ci::PlayBuildService, '#execute' do
context 'when user is developer' do
it 'raises an error' do
- expect { service.execute(build, job_variables) }
- .to raise_error Gitlab::Access::AccessDeniedError
+ expect { subject }.to raise_error Gitlab::Access::AccessDeniedError
end
end
end
diff --git a/spec/services/ci/process_sync_events_service_spec.rb b/spec/services/ci/process_sync_events_service_spec.rb
new file mode 100644
index 00000000000..00b670ff54f
--- /dev/null
+++ b/spec/services/ci/process_sync_events_service_spec.rb
@@ -0,0 +1,129 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Ci::ProcessSyncEventsService do
+ let!(:group) { create(:group) }
+ let!(:project1) { create(:project, group: group) }
+ let!(:project2) { create(:project, group: group) }
+ let!(:parent_group_1) { create(:group) }
+ let!(:parent_group_2) { create(:group) }
+
+ subject(:service) { described_class.new(sync_event_class, hierarchy_class) }
+
+ describe '#perform' do
+ subject(:execute) { service.execute }
+
+ context 'for Projects::SyncEvent' do
+ let(:sync_event_class) { Projects::SyncEvent }
+ let(:hierarchy_class) { ::Ci::ProjectMirror }
+
+ before do
+ Projects::SyncEvent.delete_all
+
+ project1.update!(group: parent_group_1)
+ project2.update!(group: parent_group_2)
+ end
+
+ it 'consumes events' do
+ expect { execute }.to change(Projects::SyncEvent, :count).from(2).to(0)
+
+ expect(project1.ci_project_mirror).to have_attributes(
+ namespace_id: parent_group_1.id
+ )
+ expect(project2.ci_project_mirror).to have_attributes(
+ namespace_id: parent_group_2.id
+ )
+ end
+
+ it 'enqueues Projects::ProcessSyncEventsWorker if any left' do
+ stub_const("#{described_class}::BATCH_SIZE", 1)
+
+ expect(Projects::ProcessSyncEventsWorker).to receive(:perform_async)
+
+ execute
+ end
+
+ it 'does not enqueue Projects::ProcessSyncEventsWorker if no left' do
+ stub_const("#{described_class}::BATCH_SIZE", 2)
+
+ expect(Projects::ProcessSyncEventsWorker).not_to receive(:perform_async)
+
+ execute
+ end
+
+ context 'when there is no event' do
+ before do
+ Projects::SyncEvent.delete_all
+ end
+
+ it 'does nothing' do
+ expect { execute }.not_to change(Projects::SyncEvent, :count)
+ end
+ end
+
+ context 'when the FF ci_namespace_project_mirrors is disabled' do
+ before do
+ stub_feature_flags(ci_namespace_project_mirrors: false)
+ end
+
+ it 'does nothing' do
+ expect { execute }.not_to change(Projects::SyncEvent, :count)
+ end
+ end
+ end
+
+ context 'for Namespaces::SyncEvent' do
+ let(:sync_event_class) { Namespaces::SyncEvent }
+ let(:hierarchy_class) { ::Ci::NamespaceMirror }
+
+ before do
+ Namespaces::SyncEvent.delete_all
+
+ group.update!(parent: parent_group_2)
+ parent_group_2.update!(parent: parent_group_1)
+ end
+
+ shared_examples 'event consuming' do
+ it 'consumes events' do
+ expect { execute }.to change(Namespaces::SyncEvent, :count).from(2).to(0)
+
+ expect(group.ci_namespace_mirror).to have_attributes(
+ traversal_ids: [parent_group_1.id, parent_group_2.id, group.id]
+ )
+ expect(parent_group_2.ci_namespace_mirror).to have_attributes(
+ traversal_ids: [parent_group_1.id, parent_group_2.id]
+ )
+ end
+ end
+
+ context 'when the FFs sync_traversal_ids, use_traversal_ids and use_traversal_ids_for_ancestors are disabled' do
+ before do
+ stub_feature_flags(sync_traversal_ids: false,
+ use_traversal_ids: false,
+ use_traversal_ids_for_ancestors: false)
+ end
+
+ it_behaves_like 'event consuming'
+ end
+
+ it_behaves_like 'event consuming'
+
+ it 'enqueues Namespaces::ProcessSyncEventsWorker if any left' do
+ stub_const("#{described_class}::BATCH_SIZE", 1)
+
+ expect(Namespaces::ProcessSyncEventsWorker).to receive(:perform_async)
+
+ execute
+ end
+
+ it 'does not enqueue Namespaces::ProcessSyncEventsWorker if no left' do
+ stub_const("#{described_class}::BATCH_SIZE", 2)
+
+ expect(Namespaces::ProcessSyncEventsWorker).not_to receive(:perform_async)
+
+ execute
+ end
+ end
+ end
+end
diff --git a/spec/services/ci/register_job_service_spec.rb b/spec/services/ci/register_job_service_spec.rb
index 650353eb751..866015aa523 100644
--- a/spec/services/ci/register_job_service_spec.rb
+++ b/spec/services/ci/register_job_service_spec.rb
@@ -87,36 +87,10 @@ module Ci
end
context 'for specific runner' do
- context 'with tables decoupling disabled' do
- before do
- stub_feature_flags(
- ci_pending_builds_project_runners_decoupling: false,
- ci_queueing_builds_enabled_checks: false)
- end
-
- around do |example|
- allow_cross_joins_across_databases(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/332952') do
- example.run
- end
- end
-
- it 'does not pick a build' do
- expect(execute(specific_runner)).to be_nil
- end
- end
-
- context 'with tables decoupling enabled' do
- before do
- stub_feature_flags(
- ci_pending_builds_project_runners_decoupling: true,
- ci_queueing_builds_enabled_checks: true)
- end
-
- it 'does not pick a build' do
- expect(execute(specific_runner)).to be_nil
- expect(pending_job.reload).to be_failed
- expect(pending_job.queuing_entry).to be_nil
- end
+ it 'does not pick a build' do
+ expect(execute(specific_runner)).to be_nil
+ expect(pending_job.reload).to be_failed
+ expect(pending_job.queuing_entry).to be_nil
end
end
end
@@ -272,34 +246,10 @@ module Ci
context 'and uses project runner' do
let(:build) { execute(specific_runner) }
- context 'with tables decoupling disabled' do
- before do
- stub_feature_flags(
- ci_pending_builds_project_runners_decoupling: false,
- ci_queueing_builds_enabled_checks: false)
- end
-
- around do |example|
- allow_cross_joins_across_databases(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/332952') do
- example.run
- end
- end
-
- it { expect(build).to be_nil }
- end
-
- context 'with tables decoupling enabled' do
- before do
- stub_feature_flags(
- ci_pending_builds_project_runners_decoupling: true,
- ci_queueing_builds_enabled_checks: true)
- end
-
- it 'does not pick a build' do
- expect(build).to be_nil
- expect(pending_job.reload).to be_failed
- expect(pending_job.queuing_entry).to be_nil
- end
+ it 'does not pick a build' do
+ expect(build).to be_nil
+ expect(pending_job.reload).to be_failed
+ expect(pending_job.queuing_entry).to be_nil
end
end
end
@@ -790,17 +740,17 @@ module Ci
stub_feature_flags(ci_pending_builds_queue_source: true)
end
- context 'with ci_queueing_denormalize_shared_runners_information enabled' do
+ context 'with ci_queuing_use_denormalized_data_strategy enabled' do
before do
- stub_feature_flags(ci_queueing_denormalize_shared_runners_information: true)
+ stub_feature_flags(ci_queuing_use_denormalized_data_strategy: true)
end
include_examples 'handles runner assignment'
end
- context 'with ci_queueing_denormalize_shared_runners_information disabled' do
+ context 'with ci_queuing_use_denormalized_data_strategy disabled' do
before do
- stub_feature_flags(ci_queueing_denormalize_shared_runners_information: false)
+ stub_feature_flags(ci_queuing_use_denormalized_data_strategy: false)
end
around do |example|
@@ -812,37 +762,9 @@ module Ci
include_examples 'handles runner assignment'
end
- context 'with ci_queueing_denormalize_tags_information enabled' do
- before do
- stub_feature_flags(ci_queueing_denormalize_tags_information: true)
- end
-
- include_examples 'handles runner assignment'
- end
-
- context 'with ci_queueing_denormalize_tags_information disabled' do
+ context 'with ci_queuing_use_denormalized_data_strategy enabled' do
before do
- stub_feature_flags(ci_queueing_denormalize_tags_information: false)
- end
-
- around do |example|
- allow_cross_joins_across_databases(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/332952') do
- example.run
- end
- end
-
- include_examples 'handles runner assignment'
- end
-
- context 'with ci_queueing_denormalize_namespace_traversal_ids disabled' do
- before do
- stub_feature_flags(ci_queueing_denormalize_namespace_traversal_ids: false)
- end
-
- around do |example|
- allow_cross_joins_across_databases(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/332952') do
- example.run
- end
+ stub_feature_flags(ci_queuing_use_denormalized_data_strategy: true)
end
include_examples 'handles runner assignment'
diff --git a/spec/services/ci/retry_build_service_spec.rb b/spec/services/ci/retry_build_service_spec.rb
index 16635c64434..5d56084faa8 100644
--- a/spec/services/ci/retry_build_service_spec.rb
+++ b/spec/services/ci/retry_build_service_spec.rb
@@ -73,6 +73,8 @@ RSpec.describe Ci::RetryBuildService do
scheduled_at: 10.seconds.since)
end
+ let_it_be(:internal_job_variable) { create(:ci_job_variable, job: build) }
+
before_all do
# Make sure that build has both `stage_id` and `stage` because FactoryBot
# can reset one of the fields when assigning another. We plan to deprecate
@@ -86,7 +88,7 @@ RSpec.describe Ci::RetryBuildService do
file_type: file_type, job: build, expire_at: build.artifacts_expire_at)
end
- create(:ci_job_variable, job: build)
+ create(:ci_job_variable, :dotenv_source, job: build)
create(:ci_build_need, build: build)
create(:terraform_state_version, build: build)
end
@@ -125,6 +127,11 @@ RSpec.describe Ci::RetryBuildService do
expect(new_build.needs_attributes).to match(build.needs_attributes)
expect(new_build.needs).not_to match(build.needs)
end
+
+ it 'clones only internal job variables' do
+ expect(new_build.job_variables.count).to eq(1)
+ expect(new_build.job_variables).to contain_exactly(having_attributes(key: internal_job_variable.key, value: internal_job_variable.value))
+ end
end
describe 'reject accessors' do
@@ -147,7 +154,7 @@ RSpec.describe Ci::RetryBuildService do
Ci::Build.attribute_names.map(&:to_sym) +
Ci::Build.attribute_aliases.keys.map(&:to_sym) +
Ci::Build.reflect_on_all_associations.map(&:name) +
- [:tag_list, :needs_attributes] -
+ [:tag_list, :needs_attributes, :job_variables_attributes] -
# ee-specific accessors should be tested in ee/spec/services/ci/retry_build_service_spec.rb instead
described_class.extra_accessors -
[:dast_site_profiles_build, :dast_scanner_profiles_build] # join tables
@@ -310,7 +317,7 @@ RSpec.describe Ci::RetryBuildService do
expect(build).to be_processed
end
- context 'when build with deployment is retried' do
+ shared_examples_for 'when build with deployment is retried' do
let!(:build) do
create(:ci_build, :with_deployment, :deploy_to_production,
pipeline: pipeline, stage_id: stage.id, project: project)
@@ -329,7 +336,7 @@ RSpec.describe Ci::RetryBuildService do
end
end
- context 'when build with dynamic environment is retried' do
+ shared_examples_for 'when build with dynamic environment is retried' do
let_it_be(:other_developer) { create(:user).tap { |u| project.add_developer(other_developer) } }
let(:environment_name) { 'review/$CI_COMMIT_REF_SLUG-$GITLAB_USER_ID' }
@@ -356,6 +363,18 @@ RSpec.describe Ci::RetryBuildService do
end
end
+ it_behaves_like 'when build with deployment is retried'
+ it_behaves_like 'when build with dynamic environment is retried'
+
+ context 'when create_deployment_in_separate_transaction feature flag is disabled' do
+ before do
+ stub_feature_flags(create_deployment_in_separate_transaction: false)
+ end
+
+ it_behaves_like 'when build with deployment is retried'
+ it_behaves_like 'when build with dynamic environment is retried'
+ end
+
context 'when build has needs' do
before do
create(:ci_build_need, build: build, name: 'build1')
diff --git a/spec/services/ci/stuck_builds/drop_pending_service_spec.rb b/spec/services/ci/stuck_builds/drop_pending_service_spec.rb
index aa0526edf57..ebc57af77a0 100644
--- a/spec/services/ci/stuck_builds/drop_pending_service_spec.rb
+++ b/spec/services/ci/stuck_builds/drop_pending_service_spec.rb
@@ -3,8 +3,12 @@
require 'spec_helper'
RSpec.describe Ci::StuckBuilds::DropPendingService do
- let!(:runner) { create :ci_runner }
- let!(:job) { create :ci_build, runner: runner }
+ let_it_be(:runner) { create(:ci_runner) }
+ let_it_be(:pipeline) { create(:ci_empty_pipeline) }
+ let_it_be_with_reload(:job) do
+ create(:ci_build, pipeline: pipeline, runner: runner)
+ end
+
let(:created_at) { }
let(:updated_at) { }
@@ -14,6 +18,8 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
job_attributes = { status: status }
job_attributes[:created_at] = created_at if created_at
job_attributes[:updated_at] = updated_at if updated_at
+ job_attributes.compact!
+
job.update!(job_attributes)
end
@@ -41,12 +47,6 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
-
- context 'when created_at is outside lookback window' do
- let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
-
- it_behaves_like 'job is unchanged'
- end
end
context 'when job was updated less than 1 day ago' do
@@ -63,12 +63,6 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
it_behaves_like 'job is unchanged'
end
-
- context 'when created_at is outside lookback window' do
- let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
-
- it_behaves_like 'job is unchanged'
- end
end
context 'when job was updated more than 1 hour ago' do
@@ -85,12 +79,6 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
it_behaves_like 'job is unchanged'
end
-
- context 'when created_at is outside lookback window' do
- let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
-
- it_behaves_like 'job is unchanged'
- end
end
end
@@ -115,12 +103,6 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
it_behaves_like 'job is dropped with failure reason', 'stuck_or_timeout_failure'
end
-
- context 'when created_at is outside lookback window' do
- let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
-
- it_behaves_like 'job is unchanged'
- end
end
context 'when job was updated in less than 1 hour ago' do
@@ -137,12 +119,6 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
it_behaves_like 'job is unchanged'
end
-
- context 'when created_at is outside lookback window' do
- let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
-
- it_behaves_like 'job is unchanged'
- end
end
end
end
@@ -179,12 +155,6 @@ RSpec.describe Ci::StuckBuilds::DropPendingService do
it_behaves_like 'job is unchanged'
end
-
- context 'when created_at is outside lookback window' do
- let(:created_at) { described_class::BUILD_LOOKBACK - 1.day }
-
- it_behaves_like 'job is unchanged'
- end
end
end
diff --git a/spec/services/ci/update_pending_build_service_spec.rb b/spec/services/ci/update_pending_build_service_spec.rb
index d36564938c8..2bb0aded24a 100644
--- a/spec/services/ci/update_pending_build_service_spec.rb
+++ b/spec/services/ci/update_pending_build_service_spec.rb
@@ -43,9 +43,9 @@ RSpec.describe Ci::UpdatePendingBuildService do
expect(pending_build_2.instance_runners_enabled).to be_truthy
end
- context 'when ci_pending_builds_maintain_shared_runners_data is disabled' do
+ context 'when ci_pending_builds_maintain_denormalized_data is disabled' do
before do
- stub_feature_flags(ci_pending_builds_maintain_shared_runners_data: false)
+ stub_feature_flags(ci_pending_builds_maintain_denormalized_data: false)
end
it 'does not update all pending builds', :aggregate_failures do
@@ -67,9 +67,9 @@ RSpec.describe Ci::UpdatePendingBuildService do
expect(pending_build_2.instance_runners_enabled).to be_truthy
end
- context 'when ci_pending_builds_maintain_shared_runners_data is disabled' do
+ context 'when ci_pending_builds_maintain_denormalized_data is disabled' do
before do
- stub_feature_flags(ci_pending_builds_maintain_shared_runners_data: false)
+ stub_feature_flags(ci_pending_builds_maintain_denormalized_data: false)
end
it 'does not update all pending builds', :aggregate_failures do