summaryrefslogtreecommitdiff
path: root/app/services/ci
diff options
context:
space:
mode:
Diffstat (limited to 'app/services/ci')
-rw-r--r--app/services/ci/abort_pipelines_service.rb32
-rw-r--r--app/services/ci/abort_project_pipelines_service.rb25
-rw-r--r--app/services/ci/after_requeue_job_service.rb28
-rw-r--r--app/services/ci/cancel_user_pipelines_service.rb18
-rw-r--r--app/services/ci/create_job_artifacts_service.rb172
-rw-r--r--app/services/ci/create_pipeline_service.rb3
-rw-r--r--app/services/ci/create_web_ide_terminal_service.rb3
-rw-r--r--app/services/ci/destroy_expired_job_artifacts_service.rb56
-rw-r--r--app/services/ci/disable_user_pipeline_schedules_service.rb11
-rw-r--r--app/services/ci/drop_pipeline_service.rb37
-rw-r--r--app/services/ci/generate_coverage_reports_service.rb8
-rw-r--r--app/services/ci/job_artifacts/create_service.rb174
-rw-r--r--app/services/ci/job_artifacts/destroy_all_expired_service.rb58
-rw-r--r--app/services/ci/job_artifacts/destroy_batch_service.rb74
-rw-r--r--app/services/ci/job_artifacts_destroy_batch_service.rb72
-rw-r--r--app/services/ci/pipeline_artifacts/destroy_all_expired_service.rb (renamed from app/services/ci/pipeline_artifacts/destroy_expired_artifacts_service.rb)2
-rw-r--r--app/services/ci/pipeline_processing/atomic_processing_service/status_collection.rb8
-rw-r--r--app/services/ci/pipeline_trigger_service.rb23
-rw-r--r--app/services/ci/play_bridge_service.rb4
-rw-r--r--app/services/ci/play_build_service.rb8
-rw-r--r--app/services/ci/process_build_service.rb25
-rw-r--r--app/services/ci/process_pipeline_service.rb2
-rw-r--r--app/services/ci/register_job_service.rb30
-rw-r--r--app/services/ci/retry_build_service.rb18
-rw-r--r--app/services/ci/retry_pipeline_service.rb2
-rw-r--r--app/services/ci/stop_environments_service.rb2
-rw-r--r--app/services/ci/test_failure_history_service.rb48
27 files changed, 519 insertions, 424 deletions
diff --git a/app/services/ci/abort_pipelines_service.rb b/app/services/ci/abort_pipelines_service.rb
new file mode 100644
index 00000000000..43734c4dd39
--- /dev/null
+++ b/app/services/ci/abort_pipelines_service.rb
@@ -0,0 +1,32 @@
+# frozen_string_literal: true
+
+module Ci
+ class AbortPipelinesService
+ # NOTE: This call fails pipelines in bulk without running callbacks.
+ # Only for pipeline abandonment scenarios (examples: project delete)
+ def execute(pipelines, failure_reason)
+ pipelines.cancelable.each_batch(of: 100) do |pipeline_batch|
+ now = Time.current
+
+ basic_attributes = { status: :failed }
+ all_attributes = basic_attributes.merge(failure_reason: failure_reason, finished_at: now)
+
+ bulk_fail_for(Ci::Stage, pipeline_batch, basic_attributes)
+ bulk_fail_for(CommitStatus, pipeline_batch, all_attributes)
+
+ pipeline_batch.update_all(all_attributes)
+ end
+
+ ServiceResponse.success(message: 'Pipelines stopped')
+ end
+
+ private
+
+ def bulk_fail_for(klass, pipelines, attributes)
+ klass.in_pipelines(pipelines)
+ .cancelable
+ .in_batches(of: 150) # rubocop:disable Cop/InBatches
+ .update_all(attributes)
+ end
+ end
+end
diff --git a/app/services/ci/abort_project_pipelines_service.rb b/app/services/ci/abort_project_pipelines_service.rb
deleted file mode 100644
index 0b2fa9ed3c0..00000000000
--- a/app/services/ci/abort_project_pipelines_service.rb
+++ /dev/null
@@ -1,25 +0,0 @@
-# frozen_string_literal: true
-
-module Ci
- class AbortProjectPipelinesService
- # Danger: Cancels in bulk without callbacks
- # Only for pipeline abandonment scenarios (current example: project delete)
- def execute(project)
- return unless Feature.enabled?(:abort_deleted_project_pipelines, default_enabled: :yaml)
-
- pipelines = project.all_pipelines.cancelable
- bulk_abort!(pipelines, status: :canceled)
-
- ServiceResponse.success(message: 'Pipelines canceled')
- end
-
- private
-
- def bulk_abort!(pipelines, status:)
- pipelines.each_batch do |pipeline_batch|
- CommitStatus.in_pipelines(pipeline_batch).in_batches.update_all(status: status) # rubocop: disable Cop/InBatches
- pipeline_batch.update_all(status: status)
- end
- end
- end
-end
diff --git a/app/services/ci/after_requeue_job_service.rb b/app/services/ci/after_requeue_job_service.rb
new file mode 100644
index 00000000000..3858ee9d550
--- /dev/null
+++ b/app/services/ci/after_requeue_job_service.rb
@@ -0,0 +1,28 @@
+# frozen_string_literal: true
+
+module Ci
+ class AfterRequeueJobService < ::BaseService
+ def execute(processable)
+ process_subsequent_jobs(processable)
+ reset_ancestor_bridges(processable)
+ end
+
+ private
+
+ def process_subsequent_jobs(processable)
+ processable.pipeline.processables.skipped.after_stage(processable.stage_idx).find_each do |processable|
+ process(processable)
+ end
+ end
+
+ def reset_ancestor_bridges(processable)
+ processable.pipeline.reset_ancestor_bridges!
+ end
+
+ def process(processable)
+ Gitlab::OptimisticLocking.retry_lock(processable, name: 'ci_requeue_job') do |processable|
+ processable.process(current_user)
+ end
+ end
+ end
+end
diff --git a/app/services/ci/cancel_user_pipelines_service.rb b/app/services/ci/cancel_user_pipelines_service.rb
deleted file mode 100644
index 3d3a8032e8e..00000000000
--- a/app/services/ci/cancel_user_pipelines_service.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-# frozen_string_literal: true
-
-module Ci
- class CancelUserPipelinesService
- # rubocop: disable CodeReuse/ActiveRecord
- # This is a bug with CodeReuse/ActiveRecord cop
- # https://gitlab.com/gitlab-org/gitlab/issues/32332
- def execute(user)
- # TODO: fix N+1 queries https://gitlab.com/gitlab-org/gitlab/-/issues/300685
- user.pipelines.cancelable.find_each(&:cancel_running)
-
- ServiceResponse.success(message: 'Pipeline canceled')
- rescue ActiveRecord::StaleObjectError
- ServiceResponse.error(message: 'Error canceling pipeline')
- end
- # rubocop: enable CodeReuse/ActiveRecord
- end
-end
diff --git a/app/services/ci/create_job_artifacts_service.rb b/app/services/ci/create_job_artifacts_service.rb
deleted file mode 100644
index f1fdc8e2490..00000000000
--- a/app/services/ci/create_job_artifacts_service.rb
+++ /dev/null
@@ -1,172 +0,0 @@
-# frozen_string_literal: true
-
-module Ci
- class CreateJobArtifactsService < ::BaseService
- include Gitlab::Utils::UsageData
-
- ArtifactsExistError = Class.new(StandardError)
-
- LSIF_ARTIFACT_TYPE = 'lsif'
- METRICS_REPORT_UPLOAD_EVENT_NAME = 'i_testing_metrics_report_artifact_uploaders'
-
- OBJECT_STORAGE_ERRORS = [
- Errno::EIO,
- Google::Apis::ServerError,
- Signet::RemoteServerError
- ].freeze
-
- def initialize(job)
- @job = job
- @project = job.project
- end
-
- def authorize(artifact_type:, filesize: nil)
- result = validate_requirements(artifact_type: artifact_type, filesize: filesize)
- return result unless result[:status] == :success
-
- headers = JobArtifactUploader.workhorse_authorize(has_length: false, maximum_size: max_size(artifact_type))
-
- if lsif?(artifact_type)
- headers[:ProcessLsif] = true
- track_usage_event('i_source_code_code_intelligence', project.id)
- end
-
- success(headers: headers)
- end
-
- def execute(artifacts_file, params, metadata_file: nil)
- result = validate_requirements(artifact_type: params[:artifact_type], filesize: artifacts_file.size)
- return result unless result[:status] == :success
-
- return success if sha256_matches_existing_artifact?(params[:artifact_type], artifacts_file)
-
- artifact, artifact_metadata = build_artifact(artifacts_file, params, metadata_file)
- result = parse_artifact(artifact)
-
- track_artifact_uploader(artifact)
-
- return result unless result[:status] == :success
-
- persist_artifact(artifact, artifact_metadata, params)
- end
-
- private
-
- attr_reader :job, :project
-
- def validate_requirements(artifact_type:, filesize:)
- return too_large_error if too_large?(artifact_type, filesize)
-
- success
- end
-
- def too_large?(type, size)
- size > max_size(type) if size
- end
-
- def lsif?(type)
- type == LSIF_ARTIFACT_TYPE
- end
-
- def max_size(type)
- Ci::JobArtifact.max_artifact_size(type: type, project: project)
- end
-
- def forbidden_type_error(type)
- error("#{type} artifacts are forbidden", :forbidden)
- end
-
- def too_large_error
- error('file size has reached maximum size limit', :payload_too_large)
- end
-
- def build_artifact(artifacts_file, params, metadata_file)
- expire_in = params['expire_in'] ||
- Gitlab::CurrentSettings.current_application_settings.default_artifacts_expire_in
-
- artifact = Ci::JobArtifact.new(
- job_id: job.id,
- project: project,
- file: artifacts_file,
- file_type: params[:artifact_type],
- file_format: params[:artifact_format],
- file_sha256: artifacts_file.sha256,
- expire_in: expire_in)
-
- artifact_metadata = if metadata_file
- Ci::JobArtifact.new(
- job_id: job.id,
- project: project,
- file: metadata_file,
- file_type: :metadata,
- file_format: :gzip,
- file_sha256: metadata_file.sha256,
- expire_in: expire_in)
- end
-
- [artifact, artifact_metadata]
- end
-
- def parse_artifact(artifact)
- unless Feature.enabled?(:ci_synchronous_artifact_parsing, project, default_enabled: true)
- return success
- end
-
- case artifact.file_type
- when 'dotenv' then parse_dotenv_artifact(artifact)
- when 'cluster_applications' then parse_cluster_applications_artifact(artifact)
- else success
- end
- end
-
- def persist_artifact(artifact, artifact_metadata, params)
- Ci::JobArtifact.transaction do
- artifact.save!
- artifact_metadata&.save!
-
- # NOTE: The `artifacts_expire_at` column is already deprecated and to be removed in the near future.
- job.update_column(:artifacts_expire_at, artifact.expire_at)
- end
-
- success
- rescue ActiveRecord::RecordNotUnique => error
- track_exception(error, params)
- error('another artifact of the same type already exists', :bad_request)
- rescue *OBJECT_STORAGE_ERRORS => error
- track_exception(error, params)
- error(error.message, :service_unavailable)
- rescue => error
- track_exception(error, params)
- error(error.message, :bad_request)
- end
-
- def sha256_matches_existing_artifact?(artifact_type, artifacts_file)
- existing_artifact = job.job_artifacts.find_by_file_type(artifact_type)
- return false unless existing_artifact
-
- existing_artifact.file_sha256 == artifacts_file.sha256
- end
-
- def track_exception(error, params)
- Gitlab::ErrorTracking.track_exception(error,
- job_id: job.id,
- project_id: job.project_id,
- uploading_type: params[:artifact_type]
- )
- end
-
- def track_artifact_uploader(artifact)
- return unless artifact.file_type == 'metrics'
-
- track_usage_event(METRICS_REPORT_UPLOAD_EVENT_NAME, @job.user_id)
- end
-
- def parse_dotenv_artifact(artifact)
- Ci::ParseDotenvArtifactService.new(project, current_user).execute(artifact)
- end
-
- def parse_cluster_applications_artifact(artifact)
- Clusters::ParseClusterApplicationsArtifactService.new(job, job.user).execute(artifact)
- end
- end
-end
diff --git a/app/services/ci/create_pipeline_service.rb b/app/services/ci/create_pipeline_service.rb
index 0fd47e625fd..ca936307acc 100644
--- a/app/services/ci/create_pipeline_service.rb
+++ b/app/services/ci/create_pipeline_service.rb
@@ -96,7 +96,8 @@ module Ci
# rubocop: enable Metrics/ParameterLists
def execute!(*args, &block)
- source, params = args[0], Hash(args[1])
+ source = args[0]
+ params = Hash(args[1])
execute(source, **params, &block).tap do |pipeline|
unless pipeline.persisted?
diff --git a/app/services/ci/create_web_ide_terminal_service.rb b/app/services/ci/create_web_ide_terminal_service.rb
index 785d82094b9..3b89a599180 100644
--- a/app/services/ci/create_web_ide_terminal_service.rb
+++ b/app/services/ci/create_web_ide_terminal_service.rb
@@ -58,7 +58,8 @@ module Ci
builds: [terminal_build_seed]
}
- Gitlab::Ci::Pipeline::Seed::Stage.new(pipeline, attributes, [])
+ seed_context = Gitlab::Ci::Pipeline::Seed::Context.new(pipeline)
+ Gitlab::Ci::Pipeline::Seed::Stage.new(seed_context, attributes, [])
end
def terminal_build_seed
diff --git a/app/services/ci/destroy_expired_job_artifacts_service.rb b/app/services/ci/destroy_expired_job_artifacts_service.rb
deleted file mode 100644
index d91cfb3cc82..00000000000
--- a/app/services/ci/destroy_expired_job_artifacts_service.rb
+++ /dev/null
@@ -1,56 +0,0 @@
-# frozen_string_literal: true
-
-module Ci
- class DestroyExpiredJobArtifactsService
- include ::Gitlab::ExclusiveLeaseHelpers
- include ::Gitlab::LoopHelpers
-
- BATCH_SIZE = 100
- LOOP_TIMEOUT = 5.minutes
- LOOP_LIMIT = 1000
- EXCLUSIVE_LOCK_KEY = 'expired_job_artifacts:destroy:lock'
- LOCK_TIMEOUT = 6.minutes
-
- def initialize
- @removed_artifacts_count = 0
- end
-
- ##
- # Destroy expired job artifacts on GitLab instance
- #
- # This destroy process cannot run for more than 6 minutes. This is for
- # preventing multiple `ExpireBuildArtifactsWorker` CRON jobs run concurrently,
- # which is scheduled every 7 minutes.
- def execute
- in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
- destroy_job_artifacts_with_slow_iteration(Time.current)
- end
-
- @removed_artifacts_count
- end
-
- private
-
- def destroy_job_artifacts_with_slow_iteration(start_at)
- Ci::JobArtifact.expired_before(start_at).each_batch(of: BATCH_SIZE, column: :expire_at, order: :desc) do |relation, index|
- # For performance reasons, join with ci_pipelines after the batch is queried.
- # See: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47496
- artifacts = relation.unlocked
-
- service_response = destroy_batch_async(artifacts)
- @removed_artifacts_count += service_response[:destroyed_artifacts_count]
-
- break if loop_timeout?(start_at)
- break if index >= LOOP_LIMIT
- end
- end
-
- def destroy_batch_async(artifacts)
- Ci::JobArtifactsDestroyBatchService.new(artifacts).execute
- end
-
- def loop_timeout?(start_at)
- Time.current > start_at + LOOP_TIMEOUT
- end
- end
-end
diff --git a/app/services/ci/disable_user_pipeline_schedules_service.rb b/app/services/ci/disable_user_pipeline_schedules_service.rb
new file mode 100644
index 00000000000..6499fbba0ec
--- /dev/null
+++ b/app/services/ci/disable_user_pipeline_schedules_service.rb
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+module Ci
+ class DisableUserPipelineSchedulesService
+ def execute(user)
+ Ci::PipelineSchedule.active.owned_by(user).each_batch do |relation|
+ relation.update_all(active: false)
+ end
+ end
+ end
+end
diff --git a/app/services/ci/drop_pipeline_service.rb b/app/services/ci/drop_pipeline_service.rb
new file mode 100644
index 00000000000..f510943251b
--- /dev/null
+++ b/app/services/ci/drop_pipeline_service.rb
@@ -0,0 +1,37 @@
+# frozen_string_literal: true
+
+module Ci
+ class DropPipelineService
+ # execute service asynchronously for each cancelable pipeline
+ def execute_async_for_all(pipelines, failure_reason, context_user)
+ pipelines.cancelable.select(:id).find_in_batches do |pipelines_batch|
+ Ci::DropPipelineWorker.bulk_perform_async_with_contexts(
+ pipelines_batch,
+ arguments_proc: -> (pipeline) { [pipeline.id, failure_reason] },
+ context_proc: -> (_) { { user: context_user } }
+ )
+ end
+ end
+
+ def execute(pipeline, failure_reason, retries: 3)
+ Gitlab::OptimisticLocking.retry_lock(pipeline.cancelable_statuses, retries, name: 'ci_pipeline_drop_running') do |cancelables|
+ cancelables.find_in_batches do |batch|
+ preload_associations_for_drop(batch)
+
+ batch.each do |job|
+ job.drop(failure_reason)
+ end
+ end
+ end
+ end
+
+ private
+
+ def preload_associations_for_drop(builds_batch)
+ ActiveRecord::Associations::Preloader.new.preload( # rubocop: disable CodeReuse/ActiveRecord
+ builds_batch,
+ [:project, :pipeline, :metadata, :deployment, :taggings]
+ )
+ end
+ end
+end
diff --git a/app/services/ci/generate_coverage_reports_service.rb b/app/services/ci/generate_coverage_reports_service.rb
index b3aa7b3091b..4e6fbc5462a 100644
--- a/app/services/ci/generate_coverage_reports_service.rb
+++ b/app/services/ci/generate_coverage_reports_service.rb
@@ -15,7 +15,13 @@ module Ci
data: head_pipeline.pipeline_artifacts.find_by_file_type(:code_coverage).present.for_files(merge_request.new_paths)
}
rescue => e
- Gitlab::ErrorTracking.track_exception(e, project_id: project.id)
+ Gitlab::ErrorTracking.track_exception(
+ e,
+ project_id: project.id,
+ base_pipeline_id: base_pipeline&.id,
+ head_pipeline_id: head_pipeline&.id
+ )
+
{
status: :error,
key: key(base_pipeline, head_pipeline),
diff --git a/app/services/ci/job_artifacts/create_service.rb b/app/services/ci/job_artifacts/create_service.rb
new file mode 100644
index 00000000000..65752e56c64
--- /dev/null
+++ b/app/services/ci/job_artifacts/create_service.rb
@@ -0,0 +1,174 @@
+# frozen_string_literal: true
+
+module Ci
+ module JobArtifacts
+ class CreateService < ::BaseService
+ include Gitlab::Utils::UsageData
+
+ ArtifactsExistError = Class.new(StandardError)
+
+ LSIF_ARTIFACT_TYPE = 'lsif'
+ METRICS_REPORT_UPLOAD_EVENT_NAME = 'i_testing_metrics_report_artifact_uploaders'
+
+ OBJECT_STORAGE_ERRORS = [
+ Errno::EIO,
+ Google::Apis::ServerError,
+ Signet::RemoteServerError
+ ].freeze
+
+ def initialize(job)
+ @job = job
+ @project = job.project
+ end
+
+ def authorize(artifact_type:, filesize: nil)
+ result = validate_requirements(artifact_type: artifact_type, filesize: filesize)
+ return result unless result[:status] == :success
+
+ headers = JobArtifactUploader.workhorse_authorize(has_length: false, maximum_size: max_size(artifact_type))
+
+ if lsif?(artifact_type)
+ headers[:ProcessLsif] = true
+ track_usage_event('i_source_code_code_intelligence', project.id)
+ end
+
+ success(headers: headers)
+ end
+
+ def execute(artifacts_file, params, metadata_file: nil)
+ result = validate_requirements(artifact_type: params[:artifact_type], filesize: artifacts_file.size)
+ return result unless result[:status] == :success
+
+ return success if sha256_matches_existing_artifact?(params[:artifact_type], artifacts_file)
+
+ artifact, artifact_metadata = build_artifact(artifacts_file, params, metadata_file)
+ result = parse_artifact(artifact)
+
+ track_artifact_uploader(artifact)
+
+ return result unless result[:status] == :success
+
+ persist_artifact(artifact, artifact_metadata, params)
+ end
+
+ private
+
+ attr_reader :job, :project
+
+ def validate_requirements(artifact_type:, filesize:)
+ return too_large_error if too_large?(artifact_type, filesize)
+
+ success
+ end
+
+ def too_large?(type, size)
+ size > max_size(type) if size
+ end
+
+ def lsif?(type)
+ type == LSIF_ARTIFACT_TYPE
+ end
+
+ def max_size(type)
+ Ci::JobArtifact.max_artifact_size(type: type, project: project)
+ end
+
+ def forbidden_type_error(type)
+ error("#{type} artifacts are forbidden", :forbidden)
+ end
+
+ def too_large_error
+ error('file size has reached maximum size limit', :payload_too_large)
+ end
+
+ def build_artifact(artifacts_file, params, metadata_file)
+ expire_in = params['expire_in'] ||
+ Gitlab::CurrentSettings.current_application_settings.default_artifacts_expire_in
+
+ artifact = Ci::JobArtifact.new(
+ job_id: job.id,
+ project: project,
+ file: artifacts_file,
+ file_type: params[:artifact_type],
+ file_format: params[:artifact_format],
+ file_sha256: artifacts_file.sha256,
+ expire_in: expire_in)
+
+ artifact_metadata = if metadata_file
+ Ci::JobArtifact.new(
+ job_id: job.id,
+ project: project,
+ file: metadata_file,
+ file_type: :metadata,
+ file_format: :gzip,
+ file_sha256: metadata_file.sha256,
+ expire_in: expire_in)
+ end
+
+ [artifact, artifact_metadata]
+ end
+
+ def parse_artifact(artifact)
+ unless Feature.enabled?(:ci_synchronous_artifact_parsing, project, default_enabled: true)
+ return success
+ end
+
+ case artifact.file_type
+ when 'dotenv' then parse_dotenv_artifact(artifact)
+ when 'cluster_applications' then parse_cluster_applications_artifact(artifact)
+ else success
+ end
+ end
+
+ def persist_artifact(artifact, artifact_metadata, params)
+ Ci::JobArtifact.transaction do
+ artifact.save!
+ artifact_metadata&.save!
+
+ # NOTE: The `artifacts_expire_at` column is already deprecated and to be removed in the near future.
+ job.update_column(:artifacts_expire_at, artifact.expire_at)
+ end
+
+ success
+ rescue ActiveRecord::RecordNotUnique => error
+ track_exception(error, params)
+ error('another artifact of the same type already exists', :bad_request)
+ rescue *OBJECT_STORAGE_ERRORS => error
+ track_exception(error, params)
+ error(error.message, :service_unavailable)
+ rescue => error
+ track_exception(error, params)
+ error(error.message, :bad_request)
+ end
+
+ def sha256_matches_existing_artifact?(artifact_type, artifacts_file)
+ existing_artifact = job.job_artifacts.find_by_file_type(artifact_type)
+ return false unless existing_artifact
+
+ existing_artifact.file_sha256 == artifacts_file.sha256
+ end
+
+ def track_exception(error, params)
+ Gitlab::ErrorTracking.track_exception(error,
+ job_id: job.id,
+ project_id: job.project_id,
+ uploading_type: params[:artifact_type]
+ )
+ end
+
+ def track_artifact_uploader(artifact)
+ return unless artifact.file_type == 'metrics'
+
+ track_usage_event(METRICS_REPORT_UPLOAD_EVENT_NAME, @job.user_id)
+ end
+
+ def parse_dotenv_artifact(artifact)
+ Ci::ParseDotenvArtifactService.new(project, current_user).execute(artifact)
+ end
+
+ def parse_cluster_applications_artifact(artifact)
+ Clusters::ParseClusterApplicationsArtifactService.new(job, job.user).execute(artifact)
+ end
+ end
+ end
+end
diff --git a/app/services/ci/job_artifacts/destroy_all_expired_service.rb b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
new file mode 100644
index 00000000000..3e9cc95d135
--- /dev/null
+++ b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
@@ -0,0 +1,58 @@
+# frozen_string_literal: true
+
+module Ci
+ module JobArtifacts
+ class DestroyAllExpiredService
+ include ::Gitlab::ExclusiveLeaseHelpers
+ include ::Gitlab::LoopHelpers
+
+ BATCH_SIZE = 100
+ LOOP_TIMEOUT = 5.minutes
+ LOOP_LIMIT = 1000
+ EXCLUSIVE_LOCK_KEY = 'expired_job_artifacts:destroy:lock'
+ LOCK_TIMEOUT = 6.minutes
+
+ def initialize
+ @removed_artifacts_count = 0
+ end
+
+ ##
+ # Destroy expired job artifacts on GitLab instance
+ #
+ # This destroy process cannot run for more than 6 minutes. This is for
+ # preventing multiple `ExpireBuildArtifactsWorker` CRON jobs run concurrently,
+ # which is scheduled every 7 minutes.
+ def execute
+ in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
+ destroy_job_artifacts_with_slow_iteration(Time.current)
+ end
+
+ @removed_artifacts_count
+ end
+
+ private
+
+ def destroy_job_artifacts_with_slow_iteration(start_at)
+ Ci::JobArtifact.expired_before(start_at).each_batch(of: BATCH_SIZE, column: :expire_at, order: :desc) do |relation, index|
+ # For performance reasons, join with ci_pipelines after the batch is queried.
+ # See: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47496
+ artifacts = relation.unlocked
+
+ service_response = destroy_batch_async(artifacts)
+ @removed_artifacts_count += service_response[:destroyed_artifacts_count]
+
+ break if loop_timeout?(start_at)
+ break if index >= LOOP_LIMIT
+ end
+ end
+
+ def destroy_batch_async(artifacts)
+ Ci::JobArtifacts::DestroyBatchService.new(artifacts).execute
+ end
+
+ def loop_timeout?(start_at)
+ Time.current > start_at + LOOP_TIMEOUT
+ end
+ end
+ end
+end
diff --git a/app/services/ci/job_artifacts/destroy_batch_service.rb b/app/services/ci/job_artifacts/destroy_batch_service.rb
new file mode 100644
index 00000000000..95315dd11ec
--- /dev/null
+++ b/app/services/ci/job_artifacts/destroy_batch_service.rb
@@ -0,0 +1,74 @@
+# frozen_string_literal: true
+
+module Ci
+ module JobArtifacts
+ class DestroyBatchService
+ include BaseServiceUtility
+ include ::Gitlab::Utils::StrongMemoize
+
+ # Danger: Private - Should only be called in Ci Services that pass a batch of job artifacts
+ # Not for use outside of the Ci:: namespace
+
+ # Adds the passed batch of job artifacts to the `ci_deleted_objects` table
+ # for asyncronous destruction of the objects in Object Storage via the `Ci::DeleteObjectsService`
+ # and then deletes the batch of related `ci_job_artifacts` records.
+ # Params:
+ # +job_artifacts+:: A relation of job artifacts to destroy (fewer than MAX_JOB_ARTIFACT_BATCH_SIZE)
+ # +pick_up_at+:: When to pick up for deletion of files
+ # Returns:
+ # +Hash+:: A hash with status and destroyed_artifacts_count keys
+ def initialize(job_artifacts, pick_up_at: nil)
+ @job_artifacts = job_artifacts.with_destroy_preloads.to_a
+ @pick_up_at = pick_up_at
+ end
+
+ # rubocop: disable CodeReuse/ActiveRecord
+ def execute
+ return success(destroyed_artifacts_count: artifacts_count) if @job_artifacts.empty?
+
+ Ci::DeletedObject.transaction do
+ Ci::DeletedObject.bulk_import(@job_artifacts, @pick_up_at)
+ Ci::JobArtifact.id_in(@job_artifacts.map(&:id)).delete_all
+ destroy_related_records(@job_artifacts)
+ end
+
+ # This is executed outside of the transaction because it depends on Redis
+ update_project_statistics
+ increment_monitoring_statistics(artifacts_count)
+
+ success(destroyed_artifacts_count: artifacts_count)
+ end
+ # rubocop: enable CodeReuse/ActiveRecord
+
+ private
+
+ # This method is implemented in EE and it must do only database work
+ def destroy_related_records(artifacts); end
+
+ def update_project_statistics
+ artifacts_by_project = @job_artifacts.group_by(&:project)
+ artifacts_by_project.each do |project, artifacts|
+ delta = -artifacts.sum { |artifact| artifact.size.to_i }
+ ProjectStatistics.increment_statistic(
+ project, Ci::JobArtifact.project_statistics_name, delta)
+ end
+ end
+
+ def increment_monitoring_statistics(size)
+ metrics.increment_destroyed_artifacts(size)
+ end
+
+ def metrics
+ @metrics ||= ::Gitlab::Ci::Artifacts::Metrics.new
+ end
+
+ def artifacts_count
+ strong_memoize(:artifacts_count) do
+ @job_artifacts.count
+ end
+ end
+ end
+ end
+end
+
+Ci::JobArtifacts::DestroyBatchService.prepend_if_ee('EE::Ci::JobArtifacts::DestroyBatchService')
diff --git a/app/services/ci/job_artifacts_destroy_batch_service.rb b/app/services/ci/job_artifacts_destroy_batch_service.rb
deleted file mode 100644
index f8ece27fe86..00000000000
--- a/app/services/ci/job_artifacts_destroy_batch_service.rb
+++ /dev/null
@@ -1,72 +0,0 @@
-# frozen_string_literal: true
-
-module Ci
- class JobArtifactsDestroyBatchService
- include BaseServiceUtility
- include ::Gitlab::Utils::StrongMemoize
-
- # Danger: Private - Should only be called in Ci Services that pass a batch of job artifacts
- # Not for use outsie of the ci namespace
-
- # Adds the passed batch of job artifacts to the `ci_deleted_objects` table
- # for asyncronous destruction of the objects in Object Storage via the `Ci::DeleteObjectsService`
- # and then deletes the batch of related `ci_job_artifacts` records.
- # Params:
- # +job_artifacts+:: A relation of job artifacts to destroy (fewer than MAX_JOB_ARTIFACT_BATCH_SIZE)
- # +pick_up_at+:: When to pick up for deletion of files
- # Returns:
- # +Hash+:: A hash with status and destroyed_artifacts_count keys
- def initialize(job_artifacts, pick_up_at: nil)
- @job_artifacts = job_artifacts.with_destroy_preloads.to_a
- @pick_up_at = pick_up_at
- end
-
- # rubocop: disable CodeReuse/ActiveRecord
- def execute
- return success(destroyed_artifacts_count: artifacts_count) if @job_artifacts.empty?
-
- Ci::DeletedObject.transaction do
- Ci::DeletedObject.bulk_import(@job_artifacts, @pick_up_at)
- Ci::JobArtifact.id_in(@job_artifacts.map(&:id)).delete_all
- destroy_related_records(@job_artifacts)
- end
-
- # This is executed outside of the transaction because it depends on Redis
- update_project_statistics
- increment_monitoring_statistics(artifacts_count)
-
- success(destroyed_artifacts_count: artifacts_count)
- end
- # rubocop: enable CodeReuse/ActiveRecord
-
- private
-
- # This method is implemented in EE and it must do only database work
- def destroy_related_records(artifacts); end
-
- def update_project_statistics
- artifacts_by_project = @job_artifacts.group_by(&:project)
- artifacts_by_project.each do |project, artifacts|
- delta = -artifacts.sum { |artifact| artifact.size.to_i }
- ProjectStatistics.increment_statistic(
- project, Ci::JobArtifact.project_statistics_name, delta)
- end
- end
-
- def increment_monitoring_statistics(size)
- metrics.increment_destroyed_artifacts(size)
- end
-
- def metrics
- @metrics ||= ::Gitlab::Ci::Artifacts::Metrics.new
- end
-
- def artifacts_count
- strong_memoize(:artifacts_count) do
- @job_artifacts.count
- end
- end
- end
-end
-
-Ci::JobArtifactsDestroyBatchService.prepend_if_ee('EE::Ci::JobArtifactsDestroyBatchService')
diff --git a/app/services/ci/pipeline_artifacts/destroy_expired_artifacts_service.rb b/app/services/ci/pipeline_artifacts/destroy_all_expired_service.rb
index 0dbabe178da..fed40aef697 100644
--- a/app/services/ci/pipeline_artifacts/destroy_expired_artifacts_service.rb
+++ b/app/services/ci/pipeline_artifacts/destroy_all_expired_service.rb
@@ -2,7 +2,7 @@
module Ci
module PipelineArtifacts
- class DestroyExpiredArtifactsService
+ class DestroyAllExpiredService
include ::Gitlab::LoopHelpers
include ::Gitlab::Utils::StrongMemoize
diff --git a/app/services/ci/pipeline_processing/atomic_processing_service/status_collection.rb b/app/services/ci/pipeline_processing/atomic_processing_service/status_collection.rb
index 35818e2cf3d..883a70c9795 100644
--- a/app/services/ci/pipeline_processing/atomic_processing_service/status_collection.rb
+++ b/app/services/ci/pipeline_processing/atomic_processing_service/status_collection.rb
@@ -91,17 +91,17 @@ module Ci
def all_statuses_by_id
strong_memoize(:all_statuses_by_id) do
- all_statuses.map do |row|
+ all_statuses.to_h do |row|
[row[:id], row]
- end.to_h
+ end
end
end
def all_statuses_by_name
strong_memoize(:statuses_by_name) do
- all_statuses.map do |row|
+ all_statuses.to_h do |row|
[row[:name], row]
- end.to_h
+ end
end
end
diff --git a/app/services/ci/pipeline_trigger_service.rb b/app/services/ci/pipeline_trigger_service.rb
index dbbaefb2b2f..a5f70d62e13 100644
--- a/app/services/ci/pipeline_trigger_service.rb
+++ b/app/services/ci/pipeline_trigger_service.rb
@@ -6,8 +6,10 @@ module Ci
def execute
if trigger_from_token
+ set_application_context_from_trigger(trigger_from_token)
create_pipeline_from_trigger(trigger_from_token)
elsif job_from_token
+ set_application_context_from_job(job_from_token)
create_pipeline_from_job(job_from_token)
end
@@ -73,11 +75,7 @@ module Ci
end
def variables
- if ::Feature.enabled?(:ci_trigger_payload_into_pipeline, project, default_enabled: :yaml)
- param_variables + [payload_variable]
- else
- param_variables
- end
+ param_variables + [payload_variable]
end
def param_variables
@@ -91,5 +89,20 @@ module Ci
value: params.except(*PAYLOAD_VARIABLE_HIDDEN_PARAMS).to_json,
variable_type: :file }
end
+
+ def set_application_context_from_trigger(trigger)
+ Gitlab::ApplicationContext.push(
+ user: trigger.owner,
+ project: trigger.project
+ )
+ end
+
+ def set_application_context_from_job(job)
+ Gitlab::ApplicationContext.push(
+ user: job.user,
+ project: job.project,
+ runner: job.runner
+ )
+ end
end
end
diff --git a/app/services/ci/play_bridge_service.rb b/app/services/ci/play_bridge_service.rb
index 70c4a8e6136..c5b19a3963a 100644
--- a/app/services/ci/play_bridge_service.rb
+++ b/app/services/ci/play_bridge_service.rb
@@ -8,6 +8,10 @@ module Ci
bridge.tap do |bridge|
bridge.user = current_user
bridge.enqueue!
+
+ next unless ::Feature.enabled?(:ci_fix_pipeline_status_for_dag_needs_manual, project, default_enabled: :yaml)
+
+ AfterRequeueJobService.new(project, current_user).execute(bridge)
end
end
end
diff --git a/app/services/ci/play_build_service.rb b/app/services/ci/play_build_service.rb
index ebc980a9053..4953b1ea5fc 100644
--- a/app/services/ci/play_build_service.rb
+++ b/app/services/ci/play_build_service.rb
@@ -12,7 +12,13 @@ module Ci
# Try to enqueue the build, otherwise create a duplicate.
#
if build.enqueue
- build.tap { |action| action.update(user: current_user, job_variables_attributes: job_variables_attributes || []) }
+ build.tap do |build|
+ build.update(user: current_user, job_variables_attributes: job_variables_attributes || [])
+
+ next unless ::Feature.enabled?(:ci_fix_pipeline_status_for_dag_needs_manual, project, default_enabled: :yaml)
+
+ AfterRequeueJobService.new(project, current_user).execute(build)
+ end
else
Ci::Build.retry(build, current_user)
end
diff --git a/app/services/ci/process_build_service.rb b/app/services/ci/process_build_service.rb
index 733aa75f255..73cf3308fe7 100644
--- a/app/services/ci/process_build_service.rb
+++ b/app/services/ci/process_build_service.rb
@@ -26,14 +26,6 @@ module Ci
end
def valid_statuses_for_build(build)
- if ::Feature.enabled?(:skip_dag_manual_and_delayed_jobs, build.project, default_enabled: :yaml)
- current_valid_statuses_for_build(build)
- else
- legacy_valid_statuses_for_build(build)
- end
- end
-
- def current_valid_statuses_for_build(build)
case build.when
when 'on_success', 'manual', 'delayed'
build.scheduling_type_dag? ? %w[success] : %w[success skipped]
@@ -45,23 +37,6 @@ module Ci
[]
end
end
-
- def legacy_valid_statuses_for_build(build)
- case build.when
- when 'on_success'
- build.scheduling_type_dag? ? %w[success] : %w[success skipped]
- when 'on_failure'
- %w[failed]
- when 'always'
- %w[success failed skipped]
- when 'manual'
- %w[success skipped]
- when 'delayed'
- %w[success skipped]
- else
- []
- end
- end
end
end
diff --git a/app/services/ci/process_pipeline_service.rb b/app/services/ci/process_pipeline_service.rb
index 970652b4da3..6c69df0c616 100644
--- a/app/services/ci/process_pipeline_service.rb
+++ b/app/services/ci/process_pipeline_service.rb
@@ -19,7 +19,7 @@ module Ci
end
def metrics
- @metrics ||= ::Gitlab::Ci::Pipeline::Metrics.new
+ @metrics ||= ::Gitlab::Ci::Pipeline::Metrics
end
private
diff --git a/app/services/ci/register_job_service.rb b/app/services/ci/register_job_service.rb
index ed9e44d60f1..90341b26fd6 100644
--- a/app/services/ci/register_job_service.rb
+++ b/app/services/ci/register_job_service.rb
@@ -10,7 +10,11 @@ module Ci
Result = Struct.new(:build, :build_json, :valid?)
- MAX_QUEUE_DEPTH = 50
+ ##
+ # The queue depth limit number has been determined by observing 95
+ # percentile of effective queue depth on gitlab.com. This is only likely to
+ # affect 5% of the worst case scenarios.
+ MAX_QUEUE_DEPTH = 45
def initialize(runner)
@runner = runner
@@ -20,7 +24,7 @@ module Ci
def execute(params = {})
@metrics.increment_queue_operation(:queue_attempt)
- @metrics.observe_queue_time do
+ @metrics.observe_queue_time(:process, @runner.runner_type) do
process_queue(params)
end
end
@@ -105,22 +109,30 @@ module Ci
builds = builds.queued_before(params[:job_age].seconds.ago)
end
- if Feature.enabled?(:ci_register_job_service_one_by_one, runner)
- build_ids = builds.pluck(:id)
+ if Feature.enabled?(:ci_register_job_service_one_by_one, runner, default_enabled: true)
+ build_ids = retrieve_queue(-> { builds.pluck(:id) })
- @metrics.observe_queue_size(-> { build_ids.size })
+ @metrics.observe_queue_size(-> { build_ids.size }, @runner.runner_type)
build_ids.each do |build_id|
yield Ci::Build.find(build_id)
end
else
- @metrics.observe_queue_size(-> { builds.to_a.size })
+ builds_array = retrieve_queue(-> { builds.to_a })
- builds.each(&blk)
+ @metrics.observe_queue_size(-> { builds_array.size }, @runner.runner_type)
+
+ builds_array.each(&blk)
end
end
# rubocop: enable CodeReuse/ActiveRecord
+ def retrieve_queue(queue_query_proc)
+ @metrics.observe_queue_time(:retrieve, @runner.runner_type) do
+ queue_query_proc.call
+ end
+ end
+
def process_build(build, params)
unless build.pending?
@metrics.increment_queue_operation(:build_not_pending)
@@ -171,7 +183,7 @@ module Ci
def max_queue_depth
@max_queue_depth ||= begin
- if Feature.enabled?(:gitlab_ci_builds_queue_limit, runner, default_enabled: false)
+ if Feature.enabled?(:gitlab_ci_builds_queue_limit, runner, default_enabled: true)
MAX_QUEUE_DEPTH
else
::Gitlab::Database::MAX_INT_VALUE
@@ -266,7 +278,7 @@ module Ci
# Workaround for weird Rails bug, that makes `runner.groups.to_sql` to return `runner_id = NULL`
groups = ::Group.joins(:runner_namespaces).merge(runner.runner_namespaces)
- hierarchy_groups = Gitlab::ObjectHierarchy.new(groups).base_and_descendants
+ hierarchy_groups = Gitlab::ObjectHierarchy.new(groups, options: { use_distinct: Feature.enabled?(:use_distinct_in_register_job_object_hierarchy) }).base_and_descendants
projects = Project.where(namespace_id: hierarchy_groups)
.with_group_runners_enabled
.with_builds_enabled
diff --git a/app/services/ci/retry_build_service.rb b/app/services/ci/retry_build_service.rb
index b2c5249a0c7..e3de7f43fda 100644
--- a/app/services/ci/retry_build_service.rb
+++ b/app/services/ci/retry_build_service.rb
@@ -2,8 +2,6 @@
module Ci
class RetryBuildService < ::BaseService
- include Gitlab::OptimisticLocking
-
def self.clone_accessors
%i[pipeline project ref tag options name
allow_failure stage stage_id stage_idx trigger_request
@@ -16,12 +14,10 @@ module Ci
build.ensure_scheduling_type!
reprocess!(build).tap do |new_build|
- mark_subsequent_stages_as_processable(build)
- build.pipeline.reset_ancestor_bridges!
-
Gitlab::OptimisticLocking.retry_lock(new_build, name: 'retry_build', &:enqueue)
+ AfterRequeueJobService.new(project, current_user).execute(build)
- MergeRequests::AddTodoWhenBuildFailsService
+ ::MergeRequests::AddTodoWhenBuildFailsService
.new(project, current_user)
.close(new_build)
end
@@ -33,9 +29,9 @@ module Ci
raise Gitlab::Access::AccessDeniedError
end
- attributes = self.class.clone_accessors.map do |attribute|
+ attributes = self.class.clone_accessors.to_h do |attribute|
[attribute, build.public_send(attribute)] # rubocop:disable GitlabSecurity/PublicSend
- end.to_h
+ end
attributes[:user] = current_user
@@ -65,12 +61,6 @@ module Ci
end
build
end
-
- def mark_subsequent_stages_as_processable(build)
- build.pipeline.processables.skipped.after_stage(build.stage_idx).find_each do |skipped|
- retry_optimistic_lock(skipped, name: 'ci_retry_build_mark_subsequent_stages') { |build| build.process(current_user) }
- end
- end
end
end
diff --git a/app/services/ci/retry_pipeline_service.rb b/app/services/ci/retry_pipeline_service.rb
index 90ee7b9b3ba..bb8590a769c 100644
--- a/app/services/ci/retry_pipeline_service.rb
+++ b/app/services/ci/retry_pipeline_service.rb
@@ -28,7 +28,7 @@ module Ci
pipeline.reset_ancestor_bridges!
- MergeRequests::AddTodoWhenBuildFailsService
+ ::MergeRequests::AddTodoWhenBuildFailsService
.new(project, current_user)
.close_all(pipeline)
diff --git a/app/services/ci/stop_environments_service.rb b/app/services/ci/stop_environments_service.rb
index b6c5b398cb1..81457130fa0 100644
--- a/app/services/ci/stop_environments_service.rb
+++ b/app/services/ci/stop_environments_service.rb
@@ -35,7 +35,7 @@ module Ci
private
def environments
- @environments ||= EnvironmentsFinder
+ @environments ||= EnvironmentsByDeploymentsFinder
.new(project, current_user, ref: @ref, recently_updated: true)
.execute
end
diff --git a/app/services/ci/test_failure_history_service.rb b/app/services/ci/test_failure_history_service.rb
index 61fda79a4a2..58bbc716ff0 100644
--- a/app/services/ci/test_failure_history_service.rb
+++ b/app/services/ci/test_failure_history_service.rb
@@ -34,7 +34,7 @@ module Ci
# We fetch for up to MAX_TRACKABLE_FAILURES + 1 builds. So if ever we get
# 201 total number of builds with the assumption that each job has at least
- # 1 failed test case, then we have at least 201 failed test cases which exceeds
+ # 1 failed unit test, then we have at least 201 failed unit tests which exceeds
# the MAX_TRACKABLE_FAILURES of 200. If this is the case, let's early exit so we
# don't have to parse each JUnit report of each of the 201 builds.
failed_builds.length <= MAX_TRACKABLE_FAILURES
@@ -51,25 +51,29 @@ module Ci
end
def track_failures
- failed_test_cases = gather_failed_test_cases(failed_builds)
+ failed_unit_tests = gather_failed_unit_tests_from_reports(failed_builds)
- return if failed_test_cases.size > MAX_TRACKABLE_FAILURES
+ return if failed_unit_tests.size > MAX_TRACKABLE_FAILURES
- failed_test_cases.keys.each_slice(100) do |key_hashes|
- Ci::TestCase.transaction do
- ci_test_cases = Ci::TestCase.find_or_create_by_batch(project, key_hashes)
- failures = test_case_failures(ci_test_cases, failed_test_cases)
+ failed_unit_tests.each_slice(100) do |batch|
+ Ci::UnitTest.transaction do
+ unit_test_attrs = ci_unit_test_attrs(batch)
+ ci_unit_tests = Ci::UnitTest.find_or_create_by_batch(project, unit_test_attrs)
- Ci::TestCaseFailure.insert_all(failures)
+ failures = ci_unit_test_failure_attrs(ci_unit_tests, failed_unit_tests)
+ Ci::UnitTestFailure.insert_all(failures)
end
end
end
- def gather_failed_test_cases(failed_builds)
- failed_builds.each_with_object({}) do |build, failed_test_cases|
+ def gather_failed_unit_tests_from_reports(failed_builds)
+ failed_builds.each_with_object({}) do |build, failed_unit_tests|
test_suite = generate_test_suite!(build)
- test_suite.failed.keys.each do |key|
- failed_test_cases[key] = build
+ test_suite.failed.each do |key, unit_test|
+ failed_unit_tests[key] = {
+ build: build, # This will be used in ci_unit_test_failure_attrs
+ unit_test: unit_test # This will be used in ci_unit_test_attrs
+ }
end
end
end
@@ -79,12 +83,24 @@ module Ci
build.collect_test_reports!(Gitlab::Ci::Reports::TestReports.new)
end
- def test_case_failures(ci_test_cases, failed_test_cases)
- ci_test_cases.map do |test_case|
- build = failed_test_cases[test_case.key_hash]
+ def ci_unit_test_attrs(batch)
+ batch.map do |item|
+ unit_test = item.last[:unit_test]
{
- test_case_id: test_case.id,
+ key_hash: unit_test.key,
+ name: unit_test.name,
+ suite_name: unit_test.suite_name
+ }
+ end
+ end
+
+ def ci_unit_test_failure_attrs(ci_unit_tests, failed_unit_tests)
+ ci_unit_tests.map do |ci_unit_test|
+ build = failed_unit_tests[ci_unit_test.key_hash][:build]
+
+ {
+ unit_test_id: ci_unit_test.id,
build_id: build.id,
failed_at: build.finished_at
}