summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--GITALY_SERVER_VERSION2
-rw-r--r--app/controllers/clusters/clusters_controller.rb6
-rw-r--r--app/controllers/concerns/notes_actions.rb6
-rw-r--r--app/controllers/projects/error_tracking_controller.rb5
-rw-r--r--app/helpers/projects/error_tracking_helper.rb8
-rw-r--r--app/models/clusters/cluster.rb11
-rw-r--r--app/serializers/merge_request_poll_cached_widget_entity.rb2
-rw-r--r--app/services/clusters/cleanup/app_service.rb33
-rw-r--r--app/services/clusters/cleanup/base_service.rb43
-rw-r--r--app/services/clusters/cleanup/project_namespace_service.rb44
-rw-r--r--app/services/clusters/cleanup/service_account_service.rb27
-rw-r--r--app/views/clusters/clusters/aws/_new.html.haml5
-rw-r--r--app/views/projects/error_tracking/details.html.haml2
-rw-r--r--app/workers/all_queues.yml3
-rw-r--r--app/workers/clusters/cleanup/app_worker.rb15
-rw-r--r--app/workers/clusters/cleanup/project_namespace_worker.rb15
-rw-r--r--app/workers/clusters/cleanup/service_account_worker.rb13
-rw-r--r--app/workers/concerns/cluster_cleanup_methods.rb75
-rw-r--r--changelogs/unreleased/34406-link-to-merge-trains-documentation-on-mr-widget-is-incorrect.yml5
-rw-r--r--changelogs/unreleased/35365-bugfix-gma-group-members-cleanup.yml5
-rw-r--r--changelogs/unreleased/feature-advanced-delete-k8s-resources-ee.yml5
-rw-r--r--changelogs/unreleased/gitaly-version-v1.72.0.yml5
-rw-r--r--changelogs/unreleased/http-status-422.yml5
-rw-r--r--doc/ci/review_apps/index.md2
-rw-r--r--doc/user/group/epics/index.md23
-rw-r--r--doc/user/project/clusters/add_remove_clusters.md98
-rw-r--r--doc/user/project/issues/issue_data_and_actions.md2
-rw-r--r--qa/qa/specs/features/browser_ui/7_configure/auto_devops/create_project_with_auto_devops_spec.rb3
-rw-r--r--spec/controllers/projects/notes_controller_spec.rb11
-rw-r--r--spec/factories/clusters/clusters.rb22
-rw-r--r--spec/helpers/projects/error_tracking_helper_spec.rb17
-rw-r--r--spec/models/clusters/cluster_spec.rb42
-rw-r--r--spec/services/clusters/cleanup/app_service_spec.rb117
-rw-r--r--spec/services/clusters/cleanup/project_namespace_service_spec.rb85
-rw-r--r--spec/services/clusters/cleanup/service_account_service_spec.rb47
-rw-r--r--spec/services/clusters/destroy_service_spec.rb2
-rw-r--r--spec/support/shared_examples/models/cluster_cleanup_worker_base_shared_examples.rb11
-rw-r--r--spec/workers/clusters/cleanup/app_worker_spec.rb41
-rw-r--r--spec/workers/clusters/cleanup/project_namespace_worker_spec.rb52
-rw-r--r--spec/workers/clusters/cleanup/service_account_worker_spec.rb27
40 files changed, 846 insertions, 96 deletions
diff --git a/GITALY_SERVER_VERSION b/GITALY_SERVER_VERSION
index df484cbb1d9..0834888f558 100644
--- a/GITALY_SERVER_VERSION
+++ b/GITALY_SERVER_VERSION
@@ -1 +1 @@
-1.71.0
+1.72.0
diff --git a/app/controllers/clusters/clusters_controller.rb b/app/controllers/clusters/clusters_controller.rb
index 9a539cf7c24..673ead04709 100644
--- a/app/controllers/clusters/clusters_controller.rb
+++ b/app/controllers/clusters/clusters_controller.rb
@@ -172,11 +172,7 @@ class Clusters::ClustersController < Clusters::BaseController
private
def destroy_params
- # To be uncomented on https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
- # This MR got split into other since it was too big.
- #
- # params.permit(:cleanup)
- {}
+ params.permit(:cleanup)
end
def update_params
diff --git a/app/controllers/concerns/notes_actions.rb b/app/controllers/concerns/notes_actions.rb
index fbae4c53c31..3d599d9e7f9 100644
--- a/app/controllers/concerns/notes_actions.rb
+++ b/app/controllers/concerns/notes_actions.rb
@@ -63,7 +63,11 @@ module NotesActions
json.merge!(note_json(@note))
end
- render json: json
+ if @note.errors.present? && @note.errors.keys != [:commands_only]
+ render json: json, status: :unprocessable_entity
+ else
+ render json: json
+ end
end
format.html { redirect_back_or_default }
end
diff --git a/app/controllers/projects/error_tracking_controller.rb b/app/controllers/projects/error_tracking_controller.rb
index 9dea6b663ea..7143424473e 100644
--- a/app/controllers/projects/error_tracking_controller.rb
+++ b/app/controllers/projects/error_tracking_controller.rb
@@ -2,6 +2,7 @@
class Projects::ErrorTrackingController < Projects::ApplicationController
before_action :authorize_read_sentry_issue!
+ before_action :set_issue_id, only: [:details, :stack_trace]
POLLING_INTERVAL = 10_000
@@ -113,6 +114,10 @@ class Projects::ErrorTrackingController < Projects::ApplicationController
params.permit(:issue_id)
end
+ def set_issue_id
+ @issue_id = issue_details_params[:issue_id]
+ end
+
def set_polling_interval
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
end
diff --git a/app/helpers/projects/error_tracking_helper.rb b/app/helpers/projects/error_tracking_helper.rb
index 2f5f612ed4c..c31e16e7150 100644
--- a/app/helpers/projects/error_tracking_helper.rb
+++ b/app/helpers/projects/error_tracking_helper.rb
@@ -14,12 +14,12 @@ module Projects::ErrorTrackingHelper
}
end
- def error_details_data(project, issue)
- opts = [project, issue, { format: :json }]
+ def error_details_data(project, issue_id)
+ opts = [project, issue_id, { format: :json }]
{
- 'issue-details-path' => details_namespace_project_error_tracking_index_path(*opts),
- 'issue-stack-trace-path' => stack_trace_namespace_project_error_tracking_index_path(*opts)
+ 'issue-details-path' => details_project_error_tracking_index_path(*opts),
+ 'issue-stack-trace-path' => stack_trace_project_error_tracking_index_path(*opts)
}
end
end
diff --git a/app/models/clusters/cluster.rb b/app/models/clusters/cluster.rb
index f522f3f2fdb..98e754a1370 100644
--- a/app/models/clusters/cluster.rb
+++ b/app/models/clusters/cluster.rb
@@ -23,6 +23,7 @@ module Clusters
}.freeze
DEFAULT_ENVIRONMENT = '*'
KUBE_INGRESS_BASE_DOMAIN = 'KUBE_INGRESS_BASE_DOMAIN'
+ APPLICATIONS_ASSOCIATIONS = APPLICATIONS.values.map(&:association_name).freeze
belongs_to :user
belongs_to :management_project, class_name: '::Project', optional: true
@@ -117,7 +118,7 @@ module Clusters
scope :aws_installed, -> { aws_provided.joins(:provider_aws).merge(Clusters::Providers::Aws.with_status(:created)) }
scope :managed, -> { where(managed: true) }
-
+ scope :with_persisted_applications, -> { eager_load(*APPLICATIONS_ASSOCIATIONS) }
scope :default_environment, -> { where(environment_scope: DEFAULT_ENVIRONMENT) }
scope :for_project_namespace, -> (namespace_id) { joins(:projects).where(projects: { namespace_id: namespace_id }) }
@@ -195,9 +196,13 @@ module Clusters
{ connection_status: retrieve_connection_status }
end
+ def persisted_applications
+ APPLICATIONS_ASSOCIATIONS.map(&method(:public_send)).compact
+ end
+
def applications
- APPLICATIONS.values.map do |application_class|
- public_send(application_class.association_name) || public_send("build_#{application_class.association_name}") # rubocop:disable GitlabSecurity/PublicSend
+ APPLICATIONS_ASSOCIATIONS.map do |association_name|
+ public_send(association_name) || public_send("build_#{association_name}") # rubocop:disable GitlabSecurity/PublicSend
end
end
diff --git a/app/serializers/merge_request_poll_cached_widget_entity.rb b/app/serializers/merge_request_poll_cached_widget_entity.rb
index a3186ecbcdf..a7ff74fb888 100644
--- a/app/serializers/merge_request_poll_cached_widget_entity.rb
+++ b/app/serializers/merge_request_poll_cached_widget_entity.rb
@@ -101,3 +101,5 @@ class MergeRequestPollCachedWidgetEntity < IssuableEntity
merged_by: merge_event&.author)
end
end
+
+MergeRequestPollCachedWidgetEntity.prepend_if_ee('EE::MergeRequestPollCachedWidgetEntity')
diff --git a/app/services/clusters/cleanup/app_service.rb b/app/services/clusters/cleanup/app_service.rb
new file mode 100644
index 00000000000..a7e29c78ea0
--- /dev/null
+++ b/app/services/clusters/cleanup/app_service.rb
@@ -0,0 +1,33 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Cleanup
+ class AppService < Clusters::Cleanup::BaseService
+ def execute
+ persisted_applications = @cluster.persisted_applications
+
+ persisted_applications.each do |app|
+ next unless app.available?
+ next unless app.can_uninstall?
+
+ log_event(:uninstalling_app, application: app.class.application_name)
+ uninstall_app_async(app)
+ end
+
+ # Keep calling the worker untill all dependencies are uninstalled
+ return schedule_next_execution(Clusters::Cleanup::AppWorker) if persisted_applications.any?
+
+ log_event(:schedule_remove_project_namespaces)
+ cluster.continue_cleanup!
+ end
+
+ private
+
+ def uninstall_app_async(application)
+ application.make_scheduled!
+
+ Clusters::Applications::UninstallWorker.perform_async(application.name, application.id)
+ end
+ end
+ end
+end
diff --git a/app/services/clusters/cleanup/base_service.rb b/app/services/clusters/cleanup/base_service.rb
new file mode 100644
index 00000000000..f99e54cfc40
--- /dev/null
+++ b/app/services/clusters/cleanup/base_service.rb
@@ -0,0 +1,43 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Cleanup
+ class BaseService
+ DEFAULT_EXECUTION_INTERVAL = 1.minute
+
+ def initialize(cluster, execution_count = 0)
+ @cluster = cluster
+ @execution_count = execution_count
+ end
+
+ private
+
+ attr_reader :cluster
+
+ def logger
+ @logger ||= Gitlab::Kubernetes::Logger.build
+ end
+
+ def log_event(event, extra_data = {})
+ meta = {
+ service: self.class.name,
+ cluster_id: cluster.id,
+ execution_count: @execution_count,
+ event: event
+ }
+
+ logger.info(meta.merge(extra_data))
+ end
+
+ def schedule_next_execution(worker_class)
+ log_event(:scheduling_execution, next_execution: @execution_count + 1)
+ worker_class.perform_in(execution_interval, cluster.id, @execution_count + 1)
+ end
+
+ # Override this method to customize the execution interval
+ def execution_interval
+ DEFAULT_EXECUTION_INTERVAL
+ end
+ end
+ end
+end
diff --git a/app/services/clusters/cleanup/project_namespace_service.rb b/app/services/clusters/cleanup/project_namespace_service.rb
new file mode 100644
index 00000000000..7621be565ff
--- /dev/null
+++ b/app/services/clusters/cleanup/project_namespace_service.rb
@@ -0,0 +1,44 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Cleanup
+ class ProjectNamespaceService < BaseService
+ KUBERNETES_NAMESPACE_BATCH_SIZE = 100
+
+ def execute
+ delete_project_namespaces_in_batches
+
+ # Keep calling the worker untill all namespaces are deleted
+ if cluster.kubernetes_namespaces.exists?
+ return schedule_next_execution(Clusters::Cleanup::ProjectNamespaceWorker)
+ end
+
+ cluster.continue_cleanup!
+ end
+
+ private
+
+ def delete_project_namespaces_in_batches
+ kubernetes_namespaces_batch = cluster.kubernetes_namespaces.first(KUBERNETES_NAMESPACE_BATCH_SIZE)
+
+ kubernetes_namespaces_batch.each do |kubernetes_namespace|
+ log_event(:deleting_project_namespace, namespace: kubernetes_namespace.namespace)
+
+ begin
+ kubeclient_delete_namespace(kubernetes_namespace)
+ rescue Kubeclient::HttpError
+ next
+ end
+
+ kubernetes_namespace.destroy!
+ end
+ end
+
+ def kubeclient_delete_namespace(kubernetes_namespace)
+ cluster.kubeclient.delete_namespace(kubernetes_namespace.namespace)
+ rescue Kubeclient::ResourceNotFoundError
+ # no-op: nothing to delete
+ end
+ end
+ end
+end
diff --git a/app/services/clusters/cleanup/service_account_service.rb b/app/services/clusters/cleanup/service_account_service.rb
new file mode 100644
index 00000000000..d60bd76d388
--- /dev/null
+++ b/app/services/clusters/cleanup/service_account_service.rb
@@ -0,0 +1,27 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Cleanup
+ class ServiceAccountService < BaseService
+ def execute
+ delete_gitlab_service_account
+
+ log_event(:destroying_cluster)
+
+ cluster.destroy!
+ end
+
+ private
+
+ def delete_gitlab_service_account
+ log_event(:deleting_gitlab_service_account)
+
+ cluster.kubeclient.delete_service_account(
+ ::Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAME,
+ ::Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAMESPACE
+ )
+ rescue Kubeclient::ResourceNotFoundError
+ end
+ end
+ end
+end
diff --git a/app/views/clusters/clusters/aws/_new.html.haml b/app/views/clusters/clusters/aws/_new.html.haml
index 48467f88f52..795b80bfb6f 100644
--- a/app/views/clusters/clusters/aws/_new.html.haml
+++ b/app/views/clusters/clusters/aws/_new.html.haml
@@ -1,5 +1,6 @@
- if !Gitlab::CurrentSettings.eks_integration_enabled?
- - documentation_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path("integration/amazon") }
+ - documentation_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path('user/project/clusters/add_remove_clusters.md',
+ anchor: 'additional-requirements-for-self-managed-instances') }
= s_('Amazon authentication is not %{link_start}correctly configured%{link_end}. Ask your GitLab administrator if you want to use this service.').html_safe % { link_start: documentation_link_start, link_end: '<a/>'.html_safe }
- else
.js-create-eks-cluster-form-container{ data: { 'gitlab-managed-cluster-help-path' => help_page_path('user/project/clusters/index.md', anchor: 'gitlab-managed-clusters'),
@@ -16,5 +17,7 @@
'account-id' => Gitlab::CurrentSettings.eks_account_id,
'external-id' => @aws_role.role_external_id,
'kubernetes-integration-help-path' => help_page_path('user/project/clusters/index'),
+ 'account-and-external-ids-help-path' => help_page_path('user/project/clusters/add_remove_clusters.md', anchor: 'eks-cluster'),
+ 'create-role-arn-help-path' => help_page_path('user/project/clusters/add_remove_clusters.md', anchor: 'eks-cluster'),
'external-link-icon' => icon('external-link'),
'has-credentials' => @aws_role.role_arn.present?.to_s } }
diff --git a/app/views/projects/error_tracking/details.html.haml b/app/views/projects/error_tracking/details.html.haml
index 640746ad8f6..7015dcdcb05 100644
--- a/app/views/projects/error_tracking/details.html.haml
+++ b/app/views/projects/error_tracking/details.html.haml
@@ -1,4 +1,4 @@
- page_title _('Error Details')
- add_to_breadcrumbs 'Errors', project_error_tracking_index_path(@project)
-#js-error_details{ data: error_details_data(@current_user, @project) }
+#js-error_details{ data: error_details_data(@project, @issue_id) }
diff --git a/app/workers/all_queues.yml b/app/workers/all_queues.yml
index 66b5214cfcb..710998dcd1a 100644
--- a/app/workers/all_queues.yml
+++ b/app/workers/all_queues.yml
@@ -38,6 +38,9 @@
- gcp_cluster:cluster_patch_app
- gcp_cluster:cluster_upgrade_app
- gcp_cluster:cluster_provision
+- gcp_cluster:clusters_cleanup_app
+- gcp_cluster:clusters_cleanup_project_namespace
+- gcp_cluster:clusters_cleanup_service_account
- gcp_cluster:cluster_wait_for_app_installation
- gcp_cluster:wait_for_cluster_creation
- gcp_cluster:cluster_wait_for_ingress_ip_address
diff --git a/app/workers/clusters/cleanup/app_worker.rb b/app/workers/clusters/cleanup/app_worker.rb
index 1eedf510ba1..8b2fddd3164 100644
--- a/app/workers/clusters/cleanup/app_worker.rb
+++ b/app/workers/clusters/cleanup/app_worker.rb
@@ -3,13 +3,16 @@
module Clusters
module Cleanup
class AppWorker
- include ApplicationWorker
- include ClusterQueue
- include ClusterApplications
+ include ClusterCleanupMethods
- # TODO: Merge with https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
- # We're splitting the above MR in smaller chunks to facilitate reviews
- def perform
+ def perform(cluster_id, execution_count = 0)
+ Clusters::Cluster.with_persisted_applications.find_by_id(cluster_id).try do |cluster|
+ break unless cluster.cleanup_uninstalling_applications?
+
+ break exceeded_execution_limit(cluster) if exceeded_execution_limit?(execution_count)
+
+ ::Clusters::Cleanup::AppService.new(cluster, execution_count).execute
+ end
end
end
end
diff --git a/app/workers/clusters/cleanup/project_namespace_worker.rb b/app/workers/clusters/cleanup/project_namespace_worker.rb
index 09f2abf5d8a..8a7fbf0fde7 100644
--- a/app/workers/clusters/cleanup/project_namespace_worker.rb
+++ b/app/workers/clusters/cleanup/project_namespace_worker.rb
@@ -3,13 +3,16 @@
module Clusters
module Cleanup
class ProjectNamespaceWorker
- include ApplicationWorker
- include ClusterQueue
- include ClusterApplications
+ include ClusterCleanupMethods
- # TODO: Merge with https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
- # We're splitting the above MR in smaller chunks to facilitate reviews
- def perform
+ def perform(cluster_id, execution_count = 0)
+ Clusters::Cluster.find_by_id(cluster_id).try do |cluster|
+ break unless cluster.cleanup_removing_project_namespaces?
+
+ break exceeded_execution_limit(cluster) if exceeded_execution_limit?(execution_count)
+
+ Clusters::Cleanup::ProjectNamespaceService.new(cluster, execution_count).execute
+ end
end
end
end
diff --git a/app/workers/clusters/cleanup/service_account_worker.rb b/app/workers/clusters/cleanup/service_account_worker.rb
index fab6318a807..95de56d8ebe 100644
--- a/app/workers/clusters/cleanup/service_account_worker.rb
+++ b/app/workers/clusters/cleanup/service_account_worker.rb
@@ -3,13 +3,14 @@
module Clusters
module Cleanup
class ServiceAccountWorker
- include ApplicationWorker
- include ClusterQueue
- include ClusterApplications
+ include ClusterCleanupMethods
- # TODO: Merge with https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
- # We're splitting the above MR in smaller chunks to facilitate reviews
- def perform
+ def perform(cluster_id)
+ Clusters::Cluster.find_by_id(cluster_id).try do |cluster|
+ break unless cluster.cleanup_removing_service_account?
+
+ Clusters::Cleanup::ServiceAccountService.new(cluster).execute
+ end
end
end
end
diff --git a/app/workers/concerns/cluster_cleanup_methods.rb b/app/workers/concerns/cluster_cleanup_methods.rb
new file mode 100644
index 00000000000..04fa4d69666
--- /dev/null
+++ b/app/workers/concerns/cluster_cleanup_methods.rb
@@ -0,0 +1,75 @@
+# frozen_string_literal: true
+
+# Concern for setting Sidekiq settings for the various GitLab ObjectStorage workers.
+module ClusterCleanupMethods
+ extend ActiveSupport::Concern
+
+ include ApplicationWorker
+ include ClusterQueue
+
+ DEFAULT_EXECUTION_LIMIT = 10
+ ExceededExecutionLimitError = Class.new(StandardError)
+
+ included do
+ worker_has_external_dependencies!
+
+ sidekiq_options retry: 3
+
+ sidekiq_retries_exhausted do |msg, error|
+ cluster_id = msg['args'][0]
+
+ cluster = Clusters::Cluster.find_by_id(cluster_id)
+
+ cluster.make_cleanup_errored!("#{self.class.name} retried too many times") if cluster
+
+ logger = Gitlab::Kubernetes::Logger.build
+
+ logger.error({
+ exception: error,
+ cluster_id: cluster_id,
+ class_name: msg['class'],
+ event: :sidekiq_retries_exhausted,
+ message: msg['error_message']
+ })
+ end
+ end
+
+ private
+
+ # Override this method to customize the execution_limit
+ def execution_limit
+ DEFAULT_EXECUTION_LIMIT
+ end
+
+ def exceeded_execution_limit?(execution_count)
+ execution_count >= execution_limit
+ end
+
+ def logger
+ @logger ||= Gitlab::Kubernetes::Logger.build
+ end
+
+ def exceeded_execution_limit(cluster)
+ log_exceeded_execution_limit_error(cluster)
+
+ cluster.make_cleanup_errored!("#{self.class.name} exceeded the execution limit")
+ end
+
+ def cluster_applications_and_status(cluster)
+ cluster.persisted_applications
+ .map { |application| "#{application.name}:#{application.status_name}" }
+ .join(",")
+ end
+
+ def log_exceeded_execution_limit_error(cluster)
+ logger.error({
+ exception: ExceededExecutionLimitError.name,
+ cluster_id: cluster.id,
+ class_name: self.class.name,
+ cleanup_status: cluster.cleanup_status_name,
+ applications: cluster_applications_and_status(cluster),
+ event: :failed_to_remove_cluster_and_resources,
+ message: "exceeded execution limit of #{execution_limit} tries"
+ })
+ end
+end
diff --git a/changelogs/unreleased/34406-link-to-merge-trains-documentation-on-mr-widget-is-incorrect.yml b/changelogs/unreleased/34406-link-to-merge-trains-documentation-on-mr-widget-is-incorrect.yml
new file mode 100644
index 00000000000..2d6db8c74b9
--- /dev/null
+++ b/changelogs/unreleased/34406-link-to-merge-trains-documentation-on-mr-widget-is-incorrect.yml
@@ -0,0 +1,5 @@
+---
+title: Correct link to Merge trains documentation on MR widget
+merge_request: 19726
+author:
+type: changed
diff --git a/changelogs/unreleased/35365-bugfix-gma-group-members-cleanup.yml b/changelogs/unreleased/35365-bugfix-gma-group-members-cleanup.yml
new file mode 100644
index 00000000000..73ac5fd65b0
--- /dev/null
+++ b/changelogs/unreleased/35365-bugfix-gma-group-members-cleanup.yml
@@ -0,0 +1,5 @@
+---
+title: Fix group managed accounts members cleanup
+merge_request: 20157
+author:
+type: fixed
diff --git a/changelogs/unreleased/feature-advanced-delete-k8s-resources-ee.yml b/changelogs/unreleased/feature-advanced-delete-k8s-resources-ee.yml
new file mode 100644
index 00000000000..5323a70982c
--- /dev/null
+++ b/changelogs/unreleased/feature-advanced-delete-k8s-resources-ee.yml
@@ -0,0 +1,5 @@
+---
+title: Delete kubernetes cluster association and resources
+merge_request: 16954
+author:
+type: added
diff --git a/changelogs/unreleased/gitaly-version-v1.72.0.yml b/changelogs/unreleased/gitaly-version-v1.72.0.yml
new file mode 100644
index 00000000000..0e007befa4d
--- /dev/null
+++ b/changelogs/unreleased/gitaly-version-v1.72.0.yml
@@ -0,0 +1,5 @@
+---
+title: Upgrade to Gitaly v1.72.0
+merge_request: 20313
+author:
+type: changed
diff --git a/changelogs/unreleased/http-status-422.yml b/changelogs/unreleased/http-status-422.yml
new file mode 100644
index 00000000000..51f189b5e7c
--- /dev/null
+++ b/changelogs/unreleased/http-status-422.yml
@@ -0,0 +1,5 @@
+---
+title: Return 422 status code in case of error in submitting comments
+merge_request: 19276
+author: raju249
+type: added
diff --git a/doc/ci/review_apps/index.md b/doc/ci/review_apps/index.md
index da92fadafc4..b69562faad7 100644
--- a/doc/ci/review_apps/index.md
+++ b/doc/ci/review_apps/index.md
@@ -85,7 +85,7 @@ in your repository map to paths of pages on your website using a Route Map.
Once set, GitLab will display **View on ...** buttons, which will take you
to the pages changed directly from merge requests.
-To set up a route map, add a a file inside the repository at `.gitlab/route-map.yml`,
+To set up a route map, add a file inside the repository at `.gitlab/route-map.yml`,
which contains a YAML array that maps `source` paths (in the repository) to `public`
paths (on the website).
diff --git a/doc/user/group/epics/index.md b/doc/user/group/epics/index.md
index 0753df70bc2..01e277d5559 100644
--- a/doc/user/group/epics/index.md
+++ b/doc/user/group/epics/index.md
@@ -50,14 +50,17 @@ Any issue that belongs to a project in the epic's group, or any of the epic's
subgroups, are eligible to be added. New issues appear at the top of the list of issues in the **Epics and Issues** tab.
An epic contains a list of issues and an issue can be associated with at most
-one epic. When you add an issue to an epic that is already associated with another epic,
-the issue is automatically removed from the previous epic.
+one epic. When you add an issue that is already linked to an epic,
+the issue is automatically unlinked from its current parent.
To add an issue to an epic:
1. Click **Add an issue**.
-1. Paste the link of the issue.
- - Press <kbd>Spacebar</kbd> and repeat this step if there are multiple issues.
+1. Identify the issue to be added, using either of the following methods:
+ - Paste the link of the issue.
+ - Search for the desired issue by entering part of the issue's title, then selecting the desired match. ([From GitLab 12.5](https://gitlab.com/gitlab-org/gitlab/issues/9126))
+
+ If there are multiple issues to be added, press <kbd>Spacebar</kbd> and repeat this step.
1. Click **Add**.
To remove an issue from an epic:
@@ -72,17 +75,19 @@ To remove an issue from an epic:
Any epic that belongs to a group, or subgroup of the parent epic's group, is
eligible to be added. New child epics appear at the top of the list of epics in the **Epics and Issues** tab.
-When you add a child epic that is already associated with another epic,
-that epic is automatically removed from the previous epic.
+When you add an epic that is already linked to a parent epic, the link to its current parent is removed.
An epic can have multiple child epics with
the maximum depth being 5.
-To add a child epic:
+To add a child epic to an epic:
1. Click **Add an epic**.
-1. Paste the link of the epic.
- - Press <kbd>Spacebar</kbd> and repeat this step if there are multiple issues.
+1. Identify the epic to be added, using either of the following methods:
+ - Paste the link of the epic.
+ - Search for the desired issue by entering part of the epic's title, then selecting the desired match. ([From GitLab 12.5](https://gitlab.com/gitlab-org/gitlab/issues/9126))
+
+ If there are multiple epics to be added, press <kbd>Spacebar</kbd> and repeat this step.
1. Click **Add**.
To remove a child epic from a parent epic:
diff --git a/doc/user/project/clusters/add_remove_clusters.md b/doc/user/project/clusters/add_remove_clusters.md
index 150a451dfe5..07ef8bca972 100644
--- a/doc/user/project/clusters/add_remove_clusters.md
+++ b/doc/user/project/clusters/add_remove_clusters.md
@@ -262,55 +262,55 @@ new Kubernetes cluster to your project:
1. Click **Create Policy**, which will open a new window.
1. Select the **JSON** tab, and paste in the following snippet in place of the existing content:
- ```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Action": [
- "autoscaling:CreateAutoScalingGroup",
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeScalingActivities",
- "autoscaling:UpdateAutoScalingGroup",
- "autoscaling:CreateLaunchConfiguration",
- "autoscaling:DescribeLaunchConfigurations",
- "cloudformation:CreateStack",
- "cloudformation:DescribeStacks",
- "ec2:AuthorizeSecurityGroupEgress",
- "ec2:AuthorizeSecurityGroupIngress",
- "ec2:RevokeSecurityGroupEgress",
- "ec2:RevokeSecurityGroupIngress",
- "ec2:CreateSecurityGroup",
- "ec2:createTags",
- "ec2:DescribeImages",
- "ec2:DescribeKeyPairs",
- "ec2:DescribeRegions",
- "ec2:DescribeSecurityGroups",
- "ec2:DescribeSubnets",
- "ec2:DescribeVpcs",
- "eks:CreateCluster",
- "eks:DescribeCluster",
- "iam:AddRoleToInstanceProfile",
- "iam:AttachRolePolicy",
- "iam:CreateRole",
- "iam:CreateInstanceProfile",
- "iam:GetRole",
- "iam:ListRoles",
- "iam:PassRole",
- "ssm:GetParameters"
- ],
- "Resource": "*"
- }
- ]
- }
- ```
-
- NOTE: **Note:**
- These permissions give GitLab the ability to create resources, but not delete them.
- This means that if an error is encountered during the creation process, changes will
- not be rolled back and you must remove resources manually. You can do this by deleting
- the relevant [CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html)
+ ```json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "autoscaling:CreateAutoScalingGroup",
+ "autoscaling:DescribeAutoScalingGroups",
+ "autoscaling:DescribeScalingActivities",
+ "autoscaling:UpdateAutoScalingGroup",
+ "autoscaling:CreateLaunchConfiguration",
+ "autoscaling:DescribeLaunchConfigurations",
+ "cloudformation:CreateStack",
+ "cloudformation:DescribeStacks",
+ "ec2:AuthorizeSecurityGroupEgress",
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:RevokeSecurityGroupEgress",
+ "ec2:RevokeSecurityGroupIngress",
+ "ec2:CreateSecurityGroup",
+ "ec2:createTags",
+ "ec2:DescribeImages",
+ "ec2:DescribeKeyPairs",
+ "ec2:DescribeRegions",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeVpcs",
+ "eks:CreateCluster",
+ "eks:DescribeCluster",
+ "iam:AddRoleToInstanceProfile",
+ "iam:AttachRolePolicy",
+ "iam:CreateRole",
+ "iam:CreateInstanceProfile",
+ "iam:GetRole",
+ "iam:ListRoles",
+ "iam:PassRole",
+ "ssm:GetParameters"
+ ],
+ "Resource": "*"
+ }
+ ]
+ }
+ ```
+
+ NOTE: **Note:**
+ These permissions give GitLab the ability to create resources, but not delete them.
+ This means that if an error is encountered during the creation process, changes will
+ not be rolled back and you must remove resources manually. You can do this by deleting
+ the relevant [CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html)
1. Click **Review policy**.
1. Enter a suitable name for this policy, and click **Create Policy**. You can now close this window.
diff --git a/doc/user/project/issues/issue_data_and_actions.md b/doc/user/project/issues/issue_data_and_actions.md
index 92da4235afa..18f91352910 100644
--- a/doc/user/project/issues/issue_data_and_actions.md
+++ b/doc/user/project/issues/issue_data_and_actions.md
@@ -157,6 +157,8 @@ The plain text title and description of the issue fill the top center of the iss
The description fully supports [GitLab Flavored Markdown](../../markdown.md#gitlab-flavored-markdown-gfm),
allowing many formatting options.
+> [Since GitLab 12.5](https://gitlab.com/gitlab-org/gitlab/issues/10103), changes to an issue's description are listed in the [issue history](#23-issue-history).**(STARTER)**
+
#### 17. Mentions
You can mention a user or a group present in your GitLab instance with `@username` or
diff --git a/qa/qa/specs/features/browser_ui/7_configure/auto_devops/create_project_with_auto_devops_spec.rb b/qa/qa/specs/features/browser_ui/7_configure/auto_devops/create_project_with_auto_devops_spec.rb
index e9a3b0f75e6..3f99ae644c7 100644
--- a/qa/qa/specs/features/browser_ui/7_configure/auto_devops/create_project_with_auto_devops_spec.rb
+++ b/qa/qa/specs/features/browser_ui/7_configure/auto_devops/create_project_with_auto_devops_spec.rb
@@ -55,8 +55,7 @@ module QA
end
end
- # https://gitlab.com/gitlab-org/gitlab/issues/35156
- describe 'Auto DevOps support', :orchestrated, :kubernetes, :quarantine do
+ describe 'Auto DevOps support', :orchestrated, :kubernetes do
context 'when rbac is enabled' do
before(:all) do
@cluster = Service::KubernetesCluster.new.create!
diff --git a/spec/controllers/projects/notes_controller_spec.rb b/spec/controllers/projects/notes_controller_spec.rb
index e576a3d2d40..abc9e728cb3 100644
--- a/spec/controllers/projects/notes_controller_spec.rb
+++ b/spec/controllers/projects/notes_controller_spec.rb
@@ -259,6 +259,17 @@ describe Projects::NotesController do
end
end
+ context 'the note does not have commands_only errors' do
+ context 'for empty note' do
+ let(:note_text) { '' }
+ let(:extra_request_params) { { format: :json } }
+
+ it "returns status 422 for json" do
+ expect(response).to have_gitlab_http_status(422)
+ end
+ end
+ end
+
context 'the project is a private project' do
let(:project_visibility) { Gitlab::VisibilityLevel::PRIVATE }
diff --git a/spec/factories/clusters/clusters.rb b/spec/factories/clusters/clusters.rb
index 609e7e20187..7121850e5ff 100644
--- a/spec/factories/clusters/clusters.rb
+++ b/spec/factories/clusters/clusters.rb
@@ -90,6 +90,28 @@ FactoryBot.define do
domain { 'example.com' }
end
+ trait :with_environments do
+ transient do
+ environments { %i(staging production) }
+ end
+
+ cluster_type { Clusters::Cluster.cluster_types[:project_type] }
+
+ before(:create) do |cluster, evaluator|
+ cluster_project = create(:cluster_project, cluster: cluster)
+
+ evaluator.environments.each do |env_name|
+ environment = create(:environment, name: env_name, project: cluster_project.project)
+
+ cluster.kubernetes_namespaces << create(:cluster_kubernetes_namespace,
+ cluster: cluster,
+ cluster_project: cluster_project,
+ project: cluster_project.project,
+ environment: environment)
+ end
+ end
+ end
+
trait :not_managed do
managed { false }
end
diff --git a/spec/helpers/projects/error_tracking_helper_spec.rb b/spec/helpers/projects/error_tracking_helper_spec.rb
index 064b3ad21cb..5e7449e21b7 100644
--- a/spec/helpers/projects/error_tracking_helper_spec.rb
+++ b/spec/helpers/projects/error_tracking_helper_spec.rb
@@ -75,4 +75,21 @@ describe Projects::ErrorTrackingHelper do
end
end
end
+
+ describe '#error_details_data' do
+ let(:issue_id) { 1234 }
+ let(:route_params) { [project.owner, project, issue_id, { format: :json }] }
+ let(:details_path) { details_namespace_project_error_tracking_index_path(*route_params) }
+ let(:stack_trace_path) { stack_trace_namespace_project_error_tracking_index_path(*route_params) }
+
+ let(:result) { helper.error_details_data(project, issue_id) }
+
+ it 'returns the correct details path' do
+ expect(result['issue-details-path']).to eq details_path
+ end
+
+ it 'returns the correct stack trace path' do
+ expect(result['issue-stack-trace-path']).to eq stack_trace_path
+ end
+ end
end
diff --git a/spec/models/clusters/cluster_spec.rb b/spec/models/clusters/cluster_spec.rb
index a163229e15a..049db4f7013 100644
--- a/spec/models/clusters/cluster_spec.rb
+++ b/spec/models/clusters/cluster_spec.rb
@@ -500,6 +500,48 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
end
end
+ describe '.with_persisted_applications' do
+ let(:cluster) { create(:cluster) }
+ let!(:helm) { create(:clusters_applications_helm, :installed, cluster: cluster) }
+
+ it 'preloads persisted applications' do
+ query_rec = ActiveRecord::QueryRecorder.new do
+ described_class.with_persisted_applications.find_by_id(cluster.id).application_helm
+ end
+
+ expect(query_rec.count).to eq(1)
+ end
+ end
+
+ describe '#persisted_applications' do
+ let(:cluster) { create(:cluster) }
+
+ subject { cluster.persisted_applications }
+
+ context 'when all applications are created' do
+ let!(:helm) { create(:clusters_applications_helm, cluster: cluster) }
+ let!(:ingress) { create(:clusters_applications_ingress, cluster: cluster) }
+ let!(:cert_manager) { create(:clusters_applications_cert_manager, cluster: cluster) }
+ let!(:prometheus) { create(:clusters_applications_prometheus, cluster: cluster) }
+ let!(:runner) { create(:clusters_applications_runner, cluster: cluster) }
+ let!(:jupyter) { create(:clusters_applications_jupyter, cluster: cluster) }
+ let!(:knative) { create(:clusters_applications_knative, cluster: cluster) }
+
+ it 'returns a list of created applications' do
+ is_expected.to contain_exactly(helm, ingress, cert_manager, prometheus, runner, jupyter, knative)
+ end
+ end
+
+ context 'when not all were created' do
+ let!(:helm) { create(:clusters_applications_helm, cluster: cluster) }
+ let!(:ingress) { create(:clusters_applications_ingress, cluster: cluster) }
+
+ it 'returns a list of created applications' do
+ is_expected.to contain_exactly(helm, ingress)
+ end
+ end
+ end
+
describe '#applications' do
set(:cluster) { create(:cluster) }
diff --git a/spec/services/clusters/cleanup/app_service_spec.rb b/spec/services/clusters/cleanup/app_service_spec.rb
new file mode 100644
index 00000000000..cc27f409086
--- /dev/null
+++ b/spec/services/clusters/cleanup/app_service_spec.rb
@@ -0,0 +1,117 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Cleanup::AppService do
+ describe '#execute' do
+ let!(:cluster) { create(:cluster, :project, :cleanup_uninstalling_applications, provider_type: :gcp) }
+ let(:service) { described_class.new(cluster) }
+ let(:logger) { service.send(:logger) }
+ let(:log_meta) do
+ {
+ service: described_class.name,
+ cluster_id: cluster.id,
+ execution_count: 0
+ }
+ end
+
+ subject { service.execute }
+
+ shared_examples 'does not reschedule itself' do
+ it 'does not reschedule itself' do
+ expect(Clusters::Cleanup::AppWorker).not_to receive(:perform_in)
+ end
+ end
+
+ context 'when cluster has no applications available or transitioning applications' do
+ it_behaves_like 'does not reschedule itself'
+
+ it 'transitions cluster to cleanup_removing_project_namespaces' do
+ expect { subject }
+ .to change { cluster.reload.cleanup_status_name }
+ .from(:cleanup_uninstalling_applications)
+ .to(:cleanup_removing_project_namespaces)
+ end
+
+ it 'schedules Clusters::Cleanup::ProjectNamespaceWorker' do
+ expect(Clusters::Cleanup::ProjectNamespaceWorker).to receive(:perform_async).with(cluster.id)
+ subject
+ end
+
+ it 'logs all events' do
+ expect(logger).to receive(:info)
+ .with(log_meta.merge(event: :schedule_remove_project_namespaces))
+
+ subject
+ end
+ end
+
+ context 'when cluster has uninstallable applications' do
+ shared_examples 'reschedules itself' do
+ it 'reschedules itself' do
+ expect(Clusters::Cleanup::AppWorker)
+ .to receive(:perform_in)
+ .with(1.minute, cluster.id, 1)
+
+ subject
+ end
+ end
+
+ context 'has applications with dependencies' do
+ let!(:helm) { create(:clusters_applications_helm, :installed, cluster: cluster) }
+ let!(:ingress) { create(:clusters_applications_ingress, :installed, cluster: cluster) }
+ let!(:cert_manager) { create(:clusters_applications_cert_manager, :installed, cluster: cluster) }
+ let!(:jupyter) { create(:clusters_applications_jupyter, :installed, cluster: cluster) }
+
+ it_behaves_like 'reschedules itself'
+
+ it 'only uninstalls apps that are not dependencies for other installed apps' do
+ expect(Clusters::Applications::UninstallWorker)
+ .not_to receive(:perform_async).with(helm.name, helm.id)
+
+ expect(Clusters::Applications::UninstallWorker)
+ .not_to receive(:perform_async).with(ingress.name, ingress.id)
+
+ expect(Clusters::Applications::UninstallWorker)
+ .to receive(:perform_async).with(cert_manager.name, cert_manager.id)
+ .and_call_original
+
+ expect(Clusters::Applications::UninstallWorker)
+ .to receive(:perform_async).with(jupyter.name, jupyter.id)
+ .and_call_original
+
+ subject
+ end
+
+ it 'logs application uninstalls and next execution' do
+ expect(logger).to receive(:info)
+ .with(log_meta.merge(event: :uninstalling_app, application: kind_of(String))).exactly(2).times
+ expect(logger).to receive(:info)
+ .with(log_meta.merge(event: :scheduling_execution, next_execution: 1))
+
+ subject
+ end
+
+ context 'cluster is not cleanup_uninstalling_applications' do
+ let!(:cluster) { create(:cluster, :project, provider_type: :gcp) }
+
+ it_behaves_like 'does not reschedule itself'
+ end
+ end
+
+ context 'when applications are still uninstalling/scheduled/depending on others' do
+ let!(:helm) { create(:clusters_applications_helm, :installed, cluster: cluster) }
+ let!(:ingress) { create(:clusters_applications_ingress, :scheduled, cluster: cluster) }
+ let!(:runner) { create(:clusters_applications_runner, :uninstalling, cluster: cluster) }
+
+ it_behaves_like 'reschedules itself'
+
+ it 'does not call the uninstallation service' do
+ expect(Clusters::Applications::UninstallWorker).not_to receive(:new)
+
+ subject
+ end
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/cleanup/project_namespace_service_spec.rb b/spec/services/clusters/cleanup/project_namespace_service_spec.rb
new file mode 100644
index 00000000000..22e29cc57d1
--- /dev/null
+++ b/spec/services/clusters/cleanup/project_namespace_service_spec.rb
@@ -0,0 +1,85 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Cleanup::ProjectNamespaceService do
+ describe '#execute' do
+ subject { service.execute }
+
+ let!(:service) { described_class.new(cluster) }
+ let!(:cluster) { create(:cluster, :with_environments, :cleanup_removing_project_namespaces) }
+ let!(:logger) { service.send(:logger) }
+ let(:log_meta) do
+ {
+ service: described_class.name,
+ cluster_id: cluster.id,
+ execution_count: 0
+ }
+ end
+ let(:kubeclient_instance_double) do
+ instance_double(Gitlab::Kubernetes::KubeClient, delete_namespace: nil, delete_service_account: nil)
+ end
+
+ before do
+ allow_any_instance_of(Clusters::Cluster).to receive(:kubeclient).and_return(kubeclient_instance_double)
+ end
+
+ context 'when cluster has namespaces to be deleted' do
+ it 'deletes namespaces from cluster' do
+ expect(kubeclient_instance_double).to receive(:delete_namespace)
+ .with cluster.kubernetes_namespaces[0].namespace
+ expect(kubeclient_instance_double).to receive(:delete_namespace)
+ .with(cluster.kubernetes_namespaces[1].namespace)
+
+ subject
+ end
+
+ it 'deletes namespaces from database' do
+ expect { subject }.to change { cluster.kubernetes_namespaces.exists? }.from(true).to(false)
+ end
+
+ it 'schedules ::ServiceAccountWorker' do
+ expect(Clusters::Cleanup::ServiceAccountWorker).to receive(:perform_async).with(cluster.id)
+ subject
+ end
+
+ it 'logs all events' do
+ expect(logger).to receive(:info)
+ .with(
+ log_meta.merge(
+ event: :deleting_project_namespace,
+ namespace: cluster.kubernetes_namespaces[0].namespace))
+ expect(logger).to receive(:info)
+ .with(
+ log_meta.merge(
+ event: :deleting_project_namespace,
+ namespace: cluster.kubernetes_namespaces[1].namespace))
+
+ subject
+ end
+ end
+
+ context 'when cluster has no namespaces' do
+ let!(:cluster) { create(:cluster, :cleanup_removing_project_namespaces) }
+
+ it 'schedules Clusters::Cleanup::ServiceAccountWorker' do
+ expect(Clusters::Cleanup::ServiceAccountWorker).to receive(:perform_async).with(cluster.id)
+
+ subject
+ end
+
+ it 'transitions to cleanup_removing_service_account' do
+ expect { subject }
+ .to change { cluster.reload.cleanup_status_name }
+ .from(:cleanup_removing_project_namespaces)
+ .to(:cleanup_removing_service_account)
+ end
+
+ it 'does not try to delete namespaces' do
+ expect(kubeclient_instance_double).not_to receive(:delete_namespace)
+
+ subject
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/cleanup/service_account_service_spec.rb b/spec/services/clusters/cleanup/service_account_service_spec.rb
new file mode 100644
index 00000000000..ecaf0da9fa3
--- /dev/null
+++ b/spec/services/clusters/cleanup/service_account_service_spec.rb
@@ -0,0 +1,47 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Cleanup::ServiceAccountService do
+ describe '#execute' do
+ subject { service.execute }
+
+ let!(:service) { described_class.new(cluster) }
+ let!(:cluster) { create(:cluster, :cleanup_removing_service_account) }
+ let!(:logger) { service.send(:logger) }
+ let(:log_meta) do
+ {
+ service: described_class.name,
+ cluster_id: cluster.id,
+ execution_count: 0
+ }
+ end
+ let(:kubeclient_instance_double) do
+ instance_double(Gitlab::Kubernetes::KubeClient, delete_namespace: nil, delete_service_account: nil)
+ end
+
+ before do
+ allow_any_instance_of(Clusters::Cluster).to receive(:kubeclient).and_return(kubeclient_instance_double)
+ end
+
+ it 'deletes gitlab service account' do
+ expect(kubeclient_instance_double).to receive(:delete_service_account)
+ .with(
+ ::Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAME,
+ ::Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAMESPACE)
+
+ subject
+ end
+
+ it 'logs all events' do
+ expect(logger).to receive(:info).with(log_meta.merge(event: :deleting_gitlab_service_account))
+ expect(logger).to receive(:info).with(log_meta.merge(event: :destroying_cluster))
+
+ subject
+ end
+
+ it 'deletes cluster' do
+ expect { subject }.to change { Clusters::Cluster.where(id: cluster.id).exists? }.from(true).to(false)
+ end
+ end
+end
diff --git a/spec/services/clusters/destroy_service_spec.rb b/spec/services/clusters/destroy_service_spec.rb
index c0fcc971500..43ebf8f499e 100644
--- a/spec/services/clusters/destroy_service_spec.rb
+++ b/spec/services/clusters/destroy_service_spec.rb
@@ -45,7 +45,7 @@ describe Clusters::DestroyService do
expect(Clusters::Cluster.where(id: cluster.id).exists?).not_to be_falsey
end
- it 'transition cluster#cleanup_status from cleanup_not_started to uninstalling_applications' do
+ it 'transition cluster#cleanup_status from cleanup_not_started to cleanup_uninstalling_applications' do
expect { subject }.to change { cluster.cleanup_status_name }
.from(:cleanup_not_started)
.to(:cleanup_uninstalling_applications)
diff --git a/spec/support/shared_examples/models/cluster_cleanup_worker_base_shared_examples.rb b/spec/support/shared_examples/models/cluster_cleanup_worker_base_shared_examples.rb
new file mode 100644
index 00000000000..66bbd908ea8
--- /dev/null
+++ b/spec/support/shared_examples/models/cluster_cleanup_worker_base_shared_examples.rb
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+shared_examples 'cluster cleanup worker base specs' do
+ it 'transitions to errored if sidekiq retries exhausted' do
+ job = { 'args' => [cluster.id, 0], 'jid' => '123' }
+
+ described_class.sidekiq_retries_exhausted_block.call(job)
+
+ expect(cluster.reload.cleanup_status_name).to eq(:cleanup_errored)
+ end
+end
diff --git a/spec/workers/clusters/cleanup/app_worker_spec.rb b/spec/workers/clusters/cleanup/app_worker_spec.rb
new file mode 100644
index 00000000000..29c00db8079
--- /dev/null
+++ b/spec/workers/clusters/cleanup/app_worker_spec.rb
@@ -0,0 +1,41 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Cleanup::AppWorker do
+ describe '#perform' do
+ subject { worker_instance.perform(cluster.id) }
+
+ let!(:worker_instance) { described_class.new }
+ let!(:cluster) { create(:cluster, :project, :cleanup_uninstalling_applications, provider_type: :gcp) }
+ let!(:logger) { worker_instance.send(:logger) }
+
+ it_behaves_like 'cluster cleanup worker base specs'
+
+ context 'when exceeded the execution limit' do
+ subject { worker_instance.perform(cluster.id, worker_instance.send(:execution_limit)) }
+
+ let(:worker_instance) { described_class.new }
+ let(:logger) { worker_instance.send(:logger) }
+ let!(:helm) { create(:clusters_applications_helm, :installed, cluster: cluster) }
+ let!(:ingress) { create(:clusters_applications_ingress, :scheduled, cluster: cluster) }
+
+ it 'logs the error' do
+ expect(logger).to receive(:error)
+ .with(
+ hash_including(
+ exception: 'ClusterCleanupMethods::ExceededExecutionLimitError',
+ cluster_id: kind_of(Integer),
+ class_name: described_class.name,
+ applications: "helm:installed,ingress:scheduled",
+ cleanup_status: cluster.cleanup_status_name,
+ event: :failed_to_remove_cluster_and_resources,
+ message: "exceeded execution limit of 10 tries"
+ )
+ )
+
+ subject
+ end
+ end
+ end
+end
diff --git a/spec/workers/clusters/cleanup/project_namespace_worker_spec.rb b/spec/workers/clusters/cleanup/project_namespace_worker_spec.rb
new file mode 100644
index 00000000000..8b6f22e9a61
--- /dev/null
+++ b/spec/workers/clusters/cleanup/project_namespace_worker_spec.rb
@@ -0,0 +1,52 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Cleanup::ProjectNamespaceWorker do
+ describe '#perform' do
+ context 'when cluster.cleanup_status is cleanup_removing_project_namespaces' do
+ let!(:cluster) { create(:cluster, :with_environments, :cleanup_removing_project_namespaces) }
+ let!(:worker_instance) { described_class.new }
+ let!(:logger) { worker_instance.send(:logger) }
+
+ it_behaves_like 'cluster cleanup worker base specs'
+
+ it 'calls Clusters::Cleanup::ProjectNamespaceService' do
+ expect_any_instance_of(Clusters::Cleanup::ProjectNamespaceService).to receive(:execute).once
+
+ subject.perform(cluster.id)
+ end
+
+ context 'when exceeded the execution limit' do
+ subject { worker_instance.perform(cluster.id, worker_instance.send(:execution_limit))}
+
+ it 'logs the error' do
+ expect(logger).to receive(:error)
+ .with(
+ hash_including(
+ exception: 'ClusterCleanupMethods::ExceededExecutionLimitError',
+ cluster_id: kind_of(Integer),
+ class_name: described_class.name,
+ applications: "",
+ cleanup_status: cluster.cleanup_status_name,
+ event: :failed_to_remove_cluster_and_resources,
+ message: "exceeded execution limit of 10 tries"
+ )
+ )
+
+ subject
+ end
+ end
+ end
+
+ context 'when cluster.cleanup_status is not cleanup_removing_project_namespaces' do
+ let!(:cluster) { create(:cluster, :with_environments) }
+
+ it 'does not call Clusters::Cleanup::ProjectNamespaceService' do
+ expect(Clusters::Cleanup::ProjectNamespaceService).not_to receive(:new)
+
+ subject.perform(cluster.id)
+ end
+ end
+ end
+end
diff --git a/spec/workers/clusters/cleanup/service_account_worker_spec.rb b/spec/workers/clusters/cleanup/service_account_worker_spec.rb
new file mode 100644
index 00000000000..9af53dd63c1
--- /dev/null
+++ b/spec/workers/clusters/cleanup/service_account_worker_spec.rb
@@ -0,0 +1,27 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Cleanup::ServiceAccountWorker do
+ describe '#perform' do
+ let!(:cluster) { create(:cluster, :cleanup_removing_service_account) }
+
+ context 'when cluster.cleanup_status is cleanup_removing_service_account' do
+ it 'calls Clusters::Cleanup::ServiceAccountService' do
+ expect_any_instance_of(Clusters::Cleanup::ServiceAccountService).to receive(:execute).once
+
+ subject.perform(cluster.id)
+ end
+ end
+
+ context 'when cluster.cleanup_status is not cleanup_removing_service_account' do
+ let!(:cluster) { create(:cluster, :with_environments) }
+
+ it 'does not call Clusters::Cleanup::ServiceAccountService' do
+ expect(Clusters::Cleanup::ServiceAccountService).not_to receive(:new)
+
+ subject.perform(cluster.id)
+ end
+ end
+ end
+end