summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-03-24 09:09:25 +0000
committerGitLab Bot <gitlab-bot@gitlab.com>2020-03-24 09:09:25 +0000
commit6f7881ee9dcec34141a8f34fc814b56b366d2b48 (patch)
tree25f72a06874b32b1049b79a9d7f4f1b7bca43b9b
parent8c8bf44fa64f98114f7439f751c92d59a44b3218 (diff)
downloadgitlab-ce-6f7881ee9dcec34141a8f34fc814b56b366d2b48.tar.gz
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--.rubocop.yml15
-rw-r--r--GITALY_SERVER_VERSION2
-rw-r--r--app/assets/stylesheets/notify.scss6
-rw-r--r--app/models/clusters/applications/ingress.rb6
-rw-r--r--app/models/clusters/applications/prometheus.rb12
-rw-r--r--app/models/project.rb6
-rw-r--r--app/models/user.rb1
-rw-r--r--app/models/user_canonical_email.rb8
-rw-r--r--app/services/clusters/applications/check_upgrade_progress_service.rb71
-rw-r--r--app/services/clusters/applications/prometheus_config_service.rb155
-rw-r--r--app/services/clusters/applications/prometheus_update_service.rb35
-rw-r--r--app/services/clusters/applications/schedule_update_service.rb38
-rw-r--r--app/services/users/build_service.rb2
-rw-r--r--app/services/users/update_canonical_email_service.rb56
-rw-r--r--app/services/users/update_service.rb7
-rw-r--r--app/workers/all_queues.yml14
-rw-r--r--app/workers/cluster_update_app_worker.rb50
-rw-r--r--app/workers/cluster_wait_for_app_update_worker.rb16
-rw-r--r--changelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml5
-rw-r--r--changelogs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml5
-rw-r--r--db/migrate/20200214025454_add_canonical_emails.rb26
-rw-r--r--db/structure.sql32
-rw-r--r--doc/administration/gitaly/praefect.md23
-rw-r--r--doc/user/project/merge_requests/test_coverage_visualization.md2
-rw-r--r--spec/factories/user_canonical_emails.rb8
-rw-r--r--spec/frontend/notes/components/notes_app_spec.js (renamed from spec/frontend/notes/components/note_app_spec.js)0
-rw-r--r--spec/frontend/u2f/authenticate_spec.js (renamed from spec/javascripts/u2f/authenticate_spec.js)59
-rw-r--r--spec/frontend/u2f/mock_u2f_device.js (renamed from spec/javascripts/u2f/mock_u2f_device.js)0
-rw-r--r--spec/frontend/u2f/register_spec.js (renamed from spec/javascripts/u2f/register_spec.js)48
-rw-r--r--spec/models/clusters/applications/ingress_spec.rb58
-rw-r--r--spec/models/clusters/applications/prometheus_spec.rb45
-rw-r--r--spec/models/project_spec.rb18
-rw-r--r--spec/models/user_canonical_email_spec.rb20
-rw-r--r--spec/services/clusters/applications/check_upgrade_progress_service_spec.rb94
-rw-r--r--spec/services/clusters/applications/prometheus_config_service_spec.rb158
-rw-r--r--spec/services/clusters/applications/prometheus_update_service_spec.rb92
-rw-r--r--spec/services/clusters/applications/schedule_update_service_spec.rb37
-rw-r--r--spec/services/users/build_service_spec.rb8
-rw-r--r--spec/services/users/create_service_spec.rb3
-rw-r--r--spec/services/users/update_canonical_email_service_spec.rb116
-rw-r--r--spec/services/users/update_service_spec.rb26
-rw-r--r--spec/workers/cluster_update_app_worker_spec.rb98
-rw-r--r--spec/workers/cluster_wait_for_app_update_worker_spec.rb27
43 files changed, 1438 insertions, 70 deletions
diff --git a/.rubocop.yml b/.rubocop.yml
index 9251c961289..b379f0ee722 100644
--- a/.rubocop.yml
+++ b/.rubocop.yml
@@ -331,8 +331,21 @@ RSpec/LeakyConstantDeclaration:
Enabled: true
Exclude:
- 'spec/**/*.rb'
- - 'ee/spec/**/*.rb'
- 'qa/spec/**/*.rb'
+ - 'ee/spec/lib/gitlab/geo/log_cursor/logger_spec.rb'
+ - 'ee/spec/lib/gitlab/geo/log_helpers_spec.rb'
+ - 'ee/spec/lib/gitlab/geo/replicator_spec.rb'
+ - 'ee/spec/mailers/emails/service_desk_spec.rb'
+ - 'ee/spec/migrations/remove_creations_in_gitlab_subscription_histories_spec.rb'
+ - 'ee/spec/migrations/set_resolved_state_on_vulnerabilities_spec.rb'
+ - 'ee/spec/models/repository_spec.rb'
+ - 'ee/spec/presenters/security/vulnerable_project_presenter_spec.rb'
+ - 'ee/spec/serializers/vulnerable_project_entity_spec.rb'
+ - 'ee/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb'
+ - 'ee/spec/services/dashboard/projects/list_service_spec.rb'
+ - 'ee/spec/services/metrics/dashboard/clone_dashboard_service_spec.rb'
+ - 'ee/spec/support/shared_contexts/epic_aggregate_constants.rb'
+ - 'ee/spec/workers/elastic_namespace_rollout_worker_spec.rb'
RSpec/EmptyLineAfterHook:
Enabled: false
diff --git a/GITALY_SERVER_VERSION b/GITALY_SERVER_VERSION
index c75f0df204f..e1e97da1292 100644
--- a/GITALY_SERVER_VERSION
+++ b/GITALY_SERVER_VERSION
@@ -1 +1 @@
-12.9.0-rc1
+12.9.0-rc5
diff --git a/app/assets/stylesheets/notify.scss b/app/assets/stylesheets/notify.scss
index ea82ba3e879..b59b01f4086 100644
--- a/app/assets/stylesheets/notify.scss
+++ b/app/assets/stylesheets/notify.scss
@@ -20,7 +20,11 @@ pre.commit-message {
}
.gl-label-scoped {
- box-shadow: 0 0 0 2px currentColor inset;
+ border: 2px solid currentColor;
+ box-sizing: border-box;
+ display: inline-block;
+ height: 17px;
+ line-height: 14px;
}
.gl-label-text {
diff --git a/app/models/clusters/applications/ingress.rb b/app/models/clusters/applications/ingress.rb
index 78c2a74da33..baf34e916f8 100644
--- a/app/models/clusters/applications/ingress.rb
+++ b/app/models/clusters/applications/ingress.rb
@@ -50,7 +50,7 @@ module Clusters
end
def allowed_to_uninstall?
- external_ip_or_hostname? && application_jupyter_nil_or_installable?
+ external_ip_or_hostname? && !application_jupyter_installed?
end
def install_command
@@ -161,8 +161,8 @@ module Clusters
YAML.load_file(chart_values_file).deep_merge!(specification)
end
- def application_jupyter_nil_or_installable?
- cluster.application_jupyter.nil? || cluster.application_jupyter&.installable?
+ def application_jupyter_installed?
+ cluster.application_jupyter&.installed?
end
def modsecurity_snippet_content
diff --git a/app/models/clusters/applications/prometheus.rb b/app/models/clusters/applications/prometheus.rb
index 8297f653ea7..3183318690c 100644
--- a/app/models/clusters/applications/prometheus.rb
+++ b/app/models/clusters/applications/prometheus.rb
@@ -35,6 +35,16 @@ module Clusters
.perform_async(application.cluster_id, ::PrometheusService.to_param) # rubocop:disable CodeReuse/ServiceClass
end
end
+
+ after_transition any => :updating do |application|
+ application.update(last_update_started_at: Time.now)
+ end
+ end
+
+ def updated_since?(timestamp)
+ last_update_started_at &&
+ last_update_started_at > timestamp &&
+ !update_errored?
end
def chart
@@ -148,5 +158,3 @@ module Clusters
end
end
end
-
-Clusters::Applications::Prometheus.prepend_if_ee('EE::Clusters::Applications::Prometheus')
diff --git a/app/models/project.rb b/app/models/project.rb
index 34c9c7320be..b9d8fd1e4d8 100644
--- a/app/models/project.rb
+++ b/app/models/project.rb
@@ -2411,6 +2411,12 @@ class Project < ApplicationRecord
branch_protection.fully_protected? || branch_protection.developer_can_merge?
end
+ def environments_for_scope(scope)
+ quoted_scope = ::Gitlab::SQL::Glob.q(scope)
+
+ environments.where("name LIKE (#{::Gitlab::SQL::Glob.to_like(quoted_scope)})") # rubocop:disable GitlabSecurity/SqlInjection
+ end
+
private
def closest_namespace_setting(name)
diff --git a/app/models/user.rb b/app/models/user.rb
index 7789326e8fa..4f484657f13 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -168,6 +168,7 @@ class User < ApplicationRecord
has_one :user_preference
has_one :user_detail
has_one :user_highest_role
+ has_one :user_canonical_email
#
# Validations
diff --git a/app/models/user_canonical_email.rb b/app/models/user_canonical_email.rb
new file mode 100644
index 00000000000..044e4fd775e
--- /dev/null
+++ b/app/models/user_canonical_email.rb
@@ -0,0 +1,8 @@
+# frozen_string_literal: true
+
+class UserCanonicalEmail < ApplicationRecord
+ validates :canonical_email, presence: true
+ validates :canonical_email, format: { with: Devise.email_regexp }
+
+ belongs_to :user, inverse_of: :user_canonical_email
+end
diff --git a/app/services/clusters/applications/check_upgrade_progress_service.rb b/app/services/clusters/applications/check_upgrade_progress_service.rb
new file mode 100644
index 00000000000..8502ea69f27
--- /dev/null
+++ b/app/services/clusters/applications/check_upgrade_progress_service.rb
@@ -0,0 +1,71 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Applications
+ class CheckUpgradeProgressService < BaseHelmService
+ def execute
+ return unless app.updating?
+
+ case phase
+ when ::Gitlab::Kubernetes::Pod::SUCCEEDED
+ on_success
+ when ::Gitlab::Kubernetes::Pod::FAILED
+ on_failed
+ else
+ check_timeout
+ end
+ rescue ::Kubeclient::HttpError => e
+ app.make_update_errored!("Kubernetes error: #{e.message}") unless app.update_errored?
+ end
+
+ private
+
+ def on_success
+ app.make_installed!
+ ensure
+ remove_pod
+ end
+
+ def on_failed
+ app.make_update_errored!(errors || 'Update silently failed')
+ ensure
+ remove_pod
+ end
+
+ def check_timeout
+ if timed_out?
+ begin
+ app.make_update_errored!('Update timed out')
+ ensure
+ remove_pod
+ end
+ else
+ ::ClusterWaitForAppUpdateWorker.perform_in(
+ ::ClusterWaitForAppUpdateWorker::INTERVAL, app.name, app.id)
+ end
+ end
+
+ def timed_out?
+ Time.now.utc - app.updated_at.to_time.utc > ::ClusterWaitForAppUpdateWorker::TIMEOUT
+ end
+
+ def remove_pod
+ helm_api.delete_pod!(pod_name)
+ rescue
+ # no-op
+ end
+
+ def phase
+ helm_api.status(pod_name)
+ end
+
+ def errors
+ helm_api.log(pod_name)
+ end
+
+ def pod_name
+ @pod_name ||= patch_command.pod_name
+ end
+ end
+ end
+end
diff --git a/app/services/clusters/applications/prometheus_config_service.rb b/app/services/clusters/applications/prometheus_config_service.rb
new file mode 100644
index 00000000000..34d44ab881e
--- /dev/null
+++ b/app/services/clusters/applications/prometheus_config_service.rb
@@ -0,0 +1,155 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Applications
+ class PrometheusConfigService
+ def initialize(project, cluster, app)
+ @project = project
+ @cluster = cluster
+ @app = app
+ end
+
+ def execute(config = {})
+ if has_alerts?
+ generate_alert_manager(config)
+ else
+ reset_alert_manager(config)
+ end
+ end
+
+ private
+
+ attr_reader :project, :cluster, :app
+
+ def reset_alert_manager(config)
+ config = set_alert_manager_enabled(config, false)
+ config.delete('alertmanagerFiles')
+ config['serverFiles'] ||= {}
+ config['serverFiles']['alerts'] = {}
+
+ config
+ end
+
+ def generate_alert_manager(config)
+ config = set_alert_manager_enabled(config, true)
+ config = set_alert_manager_files(config)
+
+ set_alert_manager_groups(config)
+ end
+
+ def set_alert_manager_enabled(config, enabled)
+ config['alertmanager'] ||= {}
+ config['alertmanager']['enabled'] = enabled
+
+ config
+ end
+
+ def set_alert_manager_files(config)
+ config['alertmanagerFiles'] = {
+ 'alertmanager.yml' => {
+ 'receivers' => alert_manager_receivers_params,
+ 'route' => alert_manager_route_params
+ }
+ }
+
+ config
+ end
+
+ def set_alert_manager_groups(config)
+ config['serverFiles'] ||= {}
+ config['serverFiles']['alerts'] ||= {}
+ config['serverFiles']['alerts']['groups'] ||= []
+
+ environments_with_alerts.each do |env_name, alerts|
+ index = config['serverFiles']['alerts']['groups'].find_index do |group|
+ group['name'] == env_name
+ end
+
+ if index
+ config['serverFiles']['alerts']['groups'][index]['rules'] = alerts
+ else
+ config['serverFiles']['alerts']['groups'] << {
+ 'name' => env_name,
+ 'rules' => alerts
+ }
+ end
+ end
+
+ config
+ end
+
+ def alert_manager_receivers_params
+ [
+ {
+ 'name' => 'gitlab',
+ 'webhook_configs' => [
+ {
+ 'url' => notify_url,
+ 'send_resolved' => true,
+ 'http_config' => {
+ 'bearer_token' => alert_manager_token
+ }
+ }
+ ]
+ }
+ ]
+ end
+
+ def alert_manager_token
+ app.generate_alert_manager_token!
+
+ app.alert_manager_token
+ end
+
+ def alert_manager_route_params
+ {
+ 'receiver' => 'gitlab',
+ 'group_wait' => '30s',
+ 'group_interval' => '5m',
+ 'repeat_interval' => '4h'
+ }
+ end
+
+ def notify_url
+ ::Gitlab::Routing.url_helpers
+ .notify_project_prometheus_alerts_url(project, format: :json)
+ end
+
+ def has_alerts?
+ environments_with_alerts.values.flatten(1).any?
+ end
+
+ def environments_with_alerts
+ @environments_with_alerts ||=
+ environments.each_with_object({}) do |environment, hash|
+ name = rule_name(environment)
+ hash[name] = alerts(environment)
+ end
+ end
+
+ def rule_name(environment)
+ "#{environment.name}.rules"
+ end
+
+ def alerts(environment)
+ variables = Gitlab::Prometheus::QueryVariables.call(environment)
+ alerts = Projects::Prometheus::AlertsFinder
+ .new(environment: environment)
+ .execute
+
+ alerts.map do |alert|
+ substitute_query_variables(alert.to_param, variables)
+ end
+ end
+
+ def substitute_query_variables(hash, variables)
+ hash['expr'] %= variables
+ hash
+ end
+
+ def environments
+ project.environments_for_scope(cluster.environment_scope)
+ end
+ end
+ end
+end
diff --git a/app/services/clusters/applications/prometheus_update_service.rb b/app/services/clusters/applications/prometheus_update_service.rb
new file mode 100644
index 00000000000..437f6ab1202
--- /dev/null
+++ b/app/services/clusters/applications/prometheus_update_service.rb
@@ -0,0 +1,35 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Applications
+ class PrometheusUpdateService < BaseHelmService
+ attr_accessor :project
+
+ def initialize(app, project)
+ super(app)
+ @project = project
+ end
+
+ def execute
+ app.make_updating!
+
+ helm_api.update(patch_command(values))
+
+ ::ClusterWaitForAppUpdateWorker.perform_in(::ClusterWaitForAppUpdateWorker::INTERVAL, app.name, app.id)
+ rescue ::Kubeclient::HttpError => ke
+ app.make_update_errored!("Kubernetes error: #{ke.message}")
+ rescue StandardError => e
+ app.make_update_errored!(e.message)
+ end
+
+ private
+
+ def values
+ PrometheusConfigService
+ .new(project, cluster, app)
+ .execute
+ .to_yaml
+ end
+ end
+ end
+end
diff --git a/app/services/clusters/applications/schedule_update_service.rb b/app/services/clusters/applications/schedule_update_service.rb
new file mode 100644
index 00000000000..b7639c771a8
--- /dev/null
+++ b/app/services/clusters/applications/schedule_update_service.rb
@@ -0,0 +1,38 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Applications
+ class ScheduleUpdateService
+ BACKOFF_DELAY = 2.minutes
+
+ attr_accessor :application, :project
+
+ def initialize(application, project)
+ @application = application
+ @project = project
+ end
+
+ def execute
+ return unless application
+
+ if recently_scheduled?
+ worker_class.perform_in(BACKOFF_DELAY, application.name, application.id, project.id, Time.now)
+ else
+ worker_class.perform_async(application.name, application.id, project.id, Time.now)
+ end
+ end
+
+ private
+
+ def worker_class
+ ::ClusterUpdateAppWorker
+ end
+
+ def recently_scheduled?
+ return false unless application.last_update_started_at
+
+ application.last_update_started_at.utc >= Time.now.utc - BACKOFF_DELAY
+ end
+ end
+ end
+end
diff --git a/app/services/users/build_service.rb b/app/services/users/build_service.rb
index 4c3ae2d204d..6f9f307c322 100644
--- a/app/services/users/build_service.rb
+++ b/app/services/users/build_service.rb
@@ -30,6 +30,8 @@ module Users
build_identity(user)
+ Users::UpdateCanonicalEmailService.new(user: user).execute
+
user
end
diff --git a/app/services/users/update_canonical_email_service.rb b/app/services/users/update_canonical_email_service.rb
new file mode 100644
index 00000000000..1400fd58eb4
--- /dev/null
+++ b/app/services/users/update_canonical_email_service.rb
@@ -0,0 +1,56 @@
+# frozen_string_literal: true
+
+module Users
+ class UpdateCanonicalEmailService
+ extend ActiveSupport::Concern
+
+ INCLUDED_DOMAINS_PATTERN = [/gmail.com/].freeze
+
+ def initialize(user:)
+ raise ArgumentError.new("Please provide a user") unless user&.is_a?(User)
+
+ @user = user
+ end
+
+ def execute
+ return unless user.email
+ return unless user.email.match? Devise.email_regexp
+
+ canonical_email = canonicalize_email
+
+ unless canonical_email
+ # the canonical email doesn't exist, probably because the domain doesn't match
+ # destroy any UserCanonicalEmail record associated with this user
+ user.user_canonical_email&.delete
+ # nothing else to do here
+ return
+ end
+
+ if user.user_canonical_email
+ # update to the new value
+ user.user_canonical_email.canonical_email = canonical_email
+ else
+ user.build_user_canonical_email(canonical_email: canonical_email)
+ end
+ end
+
+ private
+
+ attr_reader :user
+
+ def canonicalize_email
+ email = user.email
+
+ portions = email.split('@')
+ username = portions.shift
+ rest = portions.join
+
+ regex = Regexp.union(INCLUDED_DOMAINS_PATTERN)
+ return unless regex.match?(rest)
+
+ no_dots = username.tr('.', '')
+ before_plus = no_dots.split('+')[0]
+ "#{before_plus}@#{rest}"
+ end
+ end
+end
diff --git a/app/services/users/update_service.rb b/app/services/users/update_service.rb
index 57209043e3b..f0e9f2b7656 100644
--- a/app/services/users/update_service.rb
+++ b/app/services/users/update_service.rb
@@ -21,6 +21,7 @@ module Users
discard_read_only_attributes
assign_attributes
assign_identity
+ build_canonical_email
if @user.save(validate: validate) && update_status
notify_success(user_exists)
@@ -40,6 +41,12 @@ module Users
private
+ def build_canonical_email
+ return unless @user.email_changed?
+
+ Users::UpdateCanonicalEmailService.new(user: @user).execute
+ end
+
def update_status
return true unless @status_params
diff --git a/app/workers/all_queues.yml b/app/workers/all_queues.yml
index f4d8483db84..cae4bb73e04 100644
--- a/app/workers/all_queues.yml
+++ b/app/workers/all_queues.yml
@@ -325,6 +325,13 @@
:resource_boundary: :unknown
:weight: 1
:idempotent:
+- :name: gcp_cluster:cluster_update_app
+ :feature_category: :kubernetes_management
+ :has_external_dependencies:
+ :urgency: :low
+ :resource_boundary: :unknown
+ :weight: 1
+ :idempotent:
- :name: gcp_cluster:cluster_upgrade_app
:feature_category: :kubernetes_management
:has_external_dependencies: true
@@ -339,6 +346,13 @@
:resource_boundary: :cpu
:weight: 1
:idempotent:
+- :name: gcp_cluster:cluster_wait_for_app_update
+ :feature_category: :kubernetes_management
+ :has_external_dependencies:
+ :urgency: :low
+ :resource_boundary: :unknown
+ :weight: 1
+ :idempotent:
- :name: gcp_cluster:cluster_wait_for_ingress_ip_address
:feature_category: :kubernetes_management
:has_external_dependencies: true
diff --git a/app/workers/cluster_update_app_worker.rb b/app/workers/cluster_update_app_worker.rb
new file mode 100644
index 00000000000..7ceeb167b33
--- /dev/null
+++ b/app/workers/cluster_update_app_worker.rb
@@ -0,0 +1,50 @@
+# frozen_string_literal: true
+
+class ClusterUpdateAppWorker # rubocop:disable Scalability/IdempotentWorker
+ UpdateAlreadyInProgressError = Class.new(StandardError)
+
+ include ApplicationWorker
+ include ClusterQueue
+ include ClusterApplications
+ include ExclusiveLeaseGuard
+
+ sidekiq_options retry: 3, dead: false
+
+ LEASE_TIMEOUT = 10.minutes.to_i
+
+ def perform(app_name, app_id, project_id, scheduled_time)
+ @app_id = app_id
+
+ try_obtain_lease do
+ execute(app_name, app_id, project_id, scheduled_time)
+ end
+ end
+
+ private
+
+ # rubocop: disable CodeReuse/ActiveRecord
+ def execute(app_name, app_id, project_id, scheduled_time)
+ project = Project.find_by(id: project_id)
+ return unless project
+
+ find_application(app_name, app_id) do |app|
+ update_prometheus(app, scheduled_time, project)
+ end
+ end
+ # rubocop: enable CodeReuse/ActiveRecord
+
+ def update_prometheus(app, scheduled_time, project)
+ return if app.updated_since?(scheduled_time)
+ return if app.update_in_progress?
+
+ Clusters::Applications::PrometheusUpdateService.new(app, project).execute
+ end
+
+ def lease_key
+ @lease_key ||= "#{self.class.name.underscore}-#{@app_id}"
+ end
+
+ def lease_timeout
+ LEASE_TIMEOUT
+ end
+end
diff --git a/app/workers/cluster_wait_for_app_update_worker.rb b/app/workers/cluster_wait_for_app_update_worker.rb
new file mode 100644
index 00000000000..9f1d83c2c7b
--- /dev/null
+++ b/app/workers/cluster_wait_for_app_update_worker.rb
@@ -0,0 +1,16 @@
+# frozen_string_literal: true
+
+class ClusterWaitForAppUpdateWorker # rubocop:disable Scalability/IdempotentWorker
+ include ApplicationWorker
+ include ClusterQueue
+ include ClusterApplications
+
+ INTERVAL = 10.seconds
+ TIMEOUT = 20.minutes
+
+ def perform(app_name, app_id)
+ find_application(app_name, app_id) do |app|
+ ::Clusters::Applications::CheckUpgradeProgressService.new(app).execute
+ end
+ end
+end
diff --git a/changelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml b/changelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml
new file mode 100644
index 00000000000..64a7b22a62e
--- /dev/null
+++ b/changelogs/unreleased/fix_logic_for_ingress_can_uninstall.yml
@@ -0,0 +1,5 @@
+---
+title: Fix logic for ingress can_uninstall?
+merge_request: 27729
+author:
+type: fixed
diff --git a/changelogs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml b/changelogs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml
new file mode 100644
index 00000000000..2c20f425d80
--- /dev/null
+++ b/changelogs/unreleased/pokstad1-praefect-docs-reconcile-subcmd.yml
@@ -0,0 +1,5 @@
+---
+title: Update Gitaly to 12.9.0-rc5
+merge_request: 27631
+author:
+type: added
diff --git a/db/migrate/20200214025454_add_canonical_emails.rb b/db/migrate/20200214025454_add_canonical_emails.rb
new file mode 100644
index 00000000000..80cf535abfa
--- /dev/null
+++ b/db/migrate/20200214025454_add_canonical_emails.rb
@@ -0,0 +1,26 @@
+# frozen_string_literal: true
+
+class AddCanonicalEmails < ActiveRecord::Migration[6.0]
+ include Gitlab::Database::MigrationHelpers
+
+ DOWNTIME = false
+
+ def up
+ with_lock_retries do
+ create_table :user_canonical_emails do |t|
+ t.timestamps_with_timezone
+ t.references :user, index: false, null: false, foreign_key: { on_delete: :cascade }
+ t.string :canonical_email, null: false, index: true # rubocop:disable Migration/AddLimitToStringColumns
+ end
+ end
+
+ add_index :user_canonical_emails, [:user_id, :canonical_email], unique: true
+ add_index :user_canonical_emails, :user_id, unique: true
+ end
+
+ def down
+ with_lock_retries do
+ drop_table(:user_canonical_emails)
+ end
+ end
+end
diff --git a/db/structure.sql b/db/structure.sql
index 086f2bba6d1..ba03603a7a8 100644
--- a/db/structure.sql
+++ b/db/structure.sql
@@ -6110,6 +6110,23 @@ CREATE SEQUENCE public.user_callouts_id_seq
ALTER SEQUENCE public.user_callouts_id_seq OWNED BY public.user_callouts.id;
+CREATE TABLE public.user_canonical_emails (
+ id bigint NOT NULL,
+ created_at timestamp with time zone NOT NULL,
+ updated_at timestamp with time zone NOT NULL,
+ user_id bigint NOT NULL,
+ canonical_email character varying NOT NULL
+);
+
+CREATE SEQUENCE public.user_canonical_emails_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+ALTER SEQUENCE public.user_canonical_emails_id_seq OWNED BY public.user_canonical_emails.id;
+
CREATE TABLE public.user_custom_attributes (
id integer NOT NULL,
created_at timestamp without time zone NOT NULL,
@@ -7302,6 +7319,8 @@ ALTER TABLE ONLY public.user_agent_details ALTER COLUMN id SET DEFAULT nextval('
ALTER TABLE ONLY public.user_callouts ALTER COLUMN id SET DEFAULT nextval('public.user_callouts_id_seq'::regclass);
+ALTER TABLE ONLY public.user_canonical_emails ALTER COLUMN id SET DEFAULT nextval('public.user_canonical_emails_id_seq'::regclass);
+
ALTER TABLE ONLY public.user_custom_attributes ALTER COLUMN id SET DEFAULT nextval('public.user_custom_attributes_id_seq'::regclass);
ALTER TABLE ONLY public.user_details ALTER COLUMN user_id SET DEFAULT nextval('public.user_details_user_id_seq'::regclass);
@@ -8206,6 +8225,9 @@ ALTER TABLE ONLY public.user_agent_details
ALTER TABLE ONLY public.user_callouts
ADD CONSTRAINT user_callouts_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY public.user_canonical_emails
+ ADD CONSTRAINT user_canonical_emails_pkey PRIMARY KEY (id);
+
ALTER TABLE ONLY public.user_custom_attributes
ADD CONSTRAINT user_custom_attributes_pkey PRIMARY KEY (id);
@@ -9963,6 +9985,12 @@ CREATE INDEX index_user_callouts_on_user_id ON public.user_callouts USING btree
CREATE UNIQUE INDEX index_user_callouts_on_user_id_and_feature_name ON public.user_callouts USING btree (user_id, feature_name);
+CREATE INDEX index_user_canonical_emails_on_canonical_email ON public.user_canonical_emails USING btree (canonical_email);
+
+CREATE UNIQUE INDEX index_user_canonical_emails_on_user_id ON public.user_canonical_emails USING btree (user_id);
+
+CREATE UNIQUE INDEX index_user_canonical_emails_on_user_id_and_canonical_email ON public.user_canonical_emails USING btree (user_id, canonical_email);
+
CREATE INDEX index_user_custom_attributes_on_key_and_value ON public.user_custom_attributes USING btree (key, value);
CREATE UNIQUE INDEX index_user_custom_attributes_on_user_id_and_key ON public.user_custom_attributes USING btree (user_id, key);
@@ -11484,6 +11512,9 @@ ALTER TABLE ONLY public.labels
ALTER TABLE ONLY public.project_feature_usages
ADD CONSTRAINT fk_rails_c22a50024b FOREIGN KEY (project_id) REFERENCES public.projects(id) ON DELETE CASCADE;
+ALTER TABLE ONLY public.user_canonical_emails
+ ADD CONSTRAINT fk_rails_c2bd828b51 FOREIGN KEY (user_id) REFERENCES public.users(id) ON DELETE CASCADE;
+
ALTER TABLE ONLY public.project_repositories
ADD CONSTRAINT fk_rails_c3258dc63b FOREIGN KEY (shard_id) REFERENCES public.shards(id) ON DELETE RESTRICT;
@@ -12614,6 +12645,7 @@ INSERT INTO "schema_migrations" (version) VALUES
('20200213204737'),
('20200213220159'),
('20200213220211'),
+('20200214025454'),
('20200214034836'),
('20200214085940'),
('20200214214934'),
diff --git a/doc/administration/gitaly/praefect.md b/doc/administration/gitaly/praefect.md
index 8c766774868..5276e3a7816 100644
--- a/doc/administration/gitaly/praefect.md
+++ b/doc/administration/gitaly/praefect.md
@@ -682,6 +682,29 @@ for example behind a load balancer, `failover_enabled` should be disabled. The r
is no coordination that currently happens across different Praefect instances, so there could be a situation where
two Praefect instances think two different Gitaly nodes are the primary.
+## Backend Node Recovery
+
+When a Praefect backend node fails and is no longer able to
+replicate changes, the backend node will start to drift from the primary. If
+that node eventually recovers, it will need to be reconciled with the current
+primary. The primary node is considered the single source of truth for the
+state of a shard. The Praefect `reconcile` subcommand allows for the manual
+reconciliation between a backend node and the current primary.
+
+Run the following command on the Praefect server after all placeholders
+(`<virtual-storage>` and `<target-storage>`) have been replaced:
+
+```shell
+sudo /opt/gitlab/embedded/bin/praefect -config /var/opt/gitlab/praefect/config.toml reconcile -virtual <virtual-storage> -target <target-storage>
+```
+
+- Replace the placeholder `<virtual-storage>` with the virtual storage containing the backend node storage to be checked.
+- Replace the placeholder `<target-storage>` with the backend storage name.
+
+The command will return a list of repositories that were found to be
+inconsistent against the current primary. Each of these inconsistencies will
+also be logged with an accompanying replication job ID.
+
## Grafana
Grafana is included with GitLab, and can be used to monitor your Praefect
diff --git a/doc/user/project/merge_requests/test_coverage_visualization.md b/doc/user/project/merge_requests/test_coverage_visualization.md
index a0a4c5d3743..37b8b8c7b02 100644
--- a/doc/user/project/merge_requests/test_coverage_visualization.md
+++ b/doc/user/project/merge_requests/test_coverage_visualization.md
@@ -67,7 +67,7 @@ test:
This feature comes with the `:coverage_report_view` feature flag disabled by
default. This feature is disabled due to some performance issues with very large
-data sets. When [the performance issue](https://gitlab.com/gitlab-org/gitlab/issues/37725)
+data sets. When [the performance issue](https://gitlab.com/gitlab-org/gitlab/issues/211410)
is resolved, the feature will be enabled by default.
To enable this feature, ask a GitLab administrator with Rails console access to
diff --git a/spec/factories/user_canonical_emails.rb b/spec/factories/user_canonical_emails.rb
new file mode 100644
index 00000000000..0161d25c525
--- /dev/null
+++ b/spec/factories/user_canonical_emails.rb
@@ -0,0 +1,8 @@
+# frozen_string_literal: true
+
+FactoryBot.define do
+ factory :user_canonical_email do
+ user
+ canonical_email { user.email }
+ end
+end
diff --git a/spec/frontend/notes/components/note_app_spec.js b/spec/frontend/notes/components/notes_app_spec.js
index 2d0cca18647..2d0cca18647 100644
--- a/spec/frontend/notes/components/note_app_spec.js
+++ b/spec/frontend/notes/components/notes_app_spec.js
diff --git a/spec/javascripts/u2f/authenticate_spec.js b/spec/frontend/u2f/authenticate_spec.js
index 8f9cb270729..1d39c4857ae 100644
--- a/spec/javascripts/u2f/authenticate_spec.js
+++ b/spec/frontend/u2f/authenticate_spec.js
@@ -3,15 +3,19 @@ import U2FAuthenticate from '~/u2f/authenticate';
import 'vendor/u2f';
import MockU2FDevice from './mock_u2f_device';
-describe('U2FAuthenticate', function() {
+describe('U2FAuthenticate', () => {
+ let u2fDevice;
+ let container;
+ let component;
+
preloadFixtures('u2f/authenticate.html');
beforeEach(() => {
loadFixtures('u2f/authenticate.html');
- this.u2fDevice = new MockU2FDevice();
- this.container = $('#js-authenticate-u2f');
- this.component = new U2FAuthenticate(
- this.container,
+ u2fDevice = new MockU2FDevice();
+ container = $('#js-authenticate-u2f');
+ component = new U2FAuthenticate(
+ container,
'#js-login-u2f-form',
{
sign_requests: [],
@@ -22,21 +26,23 @@ describe('U2FAuthenticate', function() {
});
describe('with u2f unavailable', () => {
+ let oldu2f;
+
beforeEach(() => {
- spyOn(this.component, 'switchToFallbackUI');
- this.oldu2f = window.u2f;
+ jest.spyOn(component, 'switchToFallbackUI').mockImplementation(() => {});
+ oldu2f = window.u2f;
window.u2f = null;
});
afterEach(() => {
- window.u2f = this.oldu2f;
+ window.u2f = oldu2f;
});
it('falls back to normal 2fa', done => {
- this.component
+ component
.start()
.then(() => {
- expect(this.component.switchToFallbackUI).toHaveBeenCalled();
+ expect(component.switchToFallbackUI).toHaveBeenCalled();
done();
})
.catch(done.fail);
@@ -46,54 +52,55 @@ describe('U2FAuthenticate', function() {
describe('with u2f available', () => {
beforeEach(done => {
// bypass automatic form submission within renderAuthenticated
- spyOn(this.component, 'renderAuthenticated').and.returnValue(true);
- this.u2fDevice = new MockU2FDevice();
+ jest.spyOn(component, 'renderAuthenticated').mockReturnValue(true);
+ u2fDevice = new MockU2FDevice();
- this.component
+ component
.start()
.then(done)
.catch(done.fail);
});
it('allows authenticating via a U2F device', () => {
- const inProgressMessage = this.container.find('p');
+ const inProgressMessage = container.find('p');
expect(inProgressMessage.text()).toContain('Trying to communicate with your device');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
deviceData: 'this is data from the device',
});
- expect(this.component.renderAuthenticated).toHaveBeenCalledWith(
+ expect(component.renderAuthenticated).toHaveBeenCalledWith(
'{"deviceData":"this is data from the device"}',
);
});
describe('errors', () => {
it('displays an error message', () => {
- const setupButton = this.container.find('#js-login-u2f-device');
+ const setupButton = container.find('#js-login-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
errorCode: 'error!',
});
- const errorMessage = this.container.find('p');
+ const errorMessage = container.find('p');
expect(errorMessage.text()).toContain('There was a problem communicating with your device');
});
- return it('allows retrying authentication after an error', () => {
- let setupButton = this.container.find('#js-login-u2f-device');
+
+ it('allows retrying authentication after an error', () => {
+ let setupButton = container.find('#js-login-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
errorCode: 'error!',
});
- const retryButton = this.container.find('#js-u2f-try-again');
+ const retryButton = container.find('#js-u2f-try-again');
retryButton.trigger('click');
- setupButton = this.container.find('#js-login-u2f-device');
+ setupButton = container.find('#js-login-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToAuthenticateRequest({
+ u2fDevice.respondToAuthenticateRequest({
deviceData: 'this is data from the device',
});
- expect(this.component.renderAuthenticated).toHaveBeenCalledWith(
+ expect(component.renderAuthenticated).toHaveBeenCalledWith(
'{"deviceData":"this is data from the device"}',
);
});
diff --git a/spec/javascripts/u2f/mock_u2f_device.js b/spec/frontend/u2f/mock_u2f_device.js
index ec8425a4e3e..ec8425a4e3e 100644
--- a/spec/javascripts/u2f/mock_u2f_device.js
+++ b/spec/frontend/u2f/mock_u2f_device.js
diff --git a/spec/javascripts/u2f/register_spec.js b/spec/frontend/u2f/register_spec.js
index a75ceca9f4c..a4395a2123a 100644
--- a/spec/javascripts/u2f/register_spec.js
+++ b/spec/frontend/u2f/register_spec.js
@@ -3,33 +3,37 @@ import U2FRegister from '~/u2f/register';
import 'vendor/u2f';
import MockU2FDevice from './mock_u2f_device';
-describe('U2FRegister', function() {
+describe('U2FRegister', () => {
+ let u2fDevice;
+ let container;
+ let component;
+
preloadFixtures('u2f/register.html');
beforeEach(done => {
loadFixtures('u2f/register.html');
- this.u2fDevice = new MockU2FDevice();
- this.container = $('#js-register-u2f');
- this.component = new U2FRegister(this.container, $('#js-register-u2f-templates'), {}, 'token');
- this.component
+ u2fDevice = new MockU2FDevice();
+ container = $('#js-register-u2f');
+ component = new U2FRegister(container, $('#js-register-u2f-templates'), {}, 'token');
+ component
.start()
.then(done)
.catch(done.fail);
});
it('allows registering a U2F device', () => {
- const setupButton = this.container.find('#js-setup-u2f-device');
+ const setupButton = container.find('#js-setup-u2f-device');
expect(setupButton.text()).toBe('Set up new U2F device');
setupButton.trigger('click');
- const inProgressMessage = this.container.children('p');
+ const inProgressMessage = container.children('p');
expect(inProgressMessage.text()).toContain('Trying to communicate with your device');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
deviceData: 'this is data from the device',
});
- const registeredMessage = this.container.find('p');
- const deviceResponse = this.container.find('#js-device-response');
+ const registeredMessage = container.find('p');
+ const deviceResponse = container.find('#js-device-response');
expect(registeredMessage.text()).toContain('Your device was successfully set up!');
expect(deviceResponse.val()).toBe('{"deviceData":"this is data from the device"}');
@@ -37,41 +41,41 @@ describe('U2FRegister', function() {
describe('errors', () => {
it("doesn't allow the same device to be registered twice (for the same user", () => {
- const setupButton = this.container.find('#js-setup-u2f-device');
+ const setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
errorCode: 4,
});
- const errorMessage = this.container.find('p');
+ const errorMessage = container.find('p');
expect(errorMessage.text()).toContain('already been registered with us');
});
it('displays an error message for other errors', () => {
- const setupButton = this.container.find('#js-setup-u2f-device');
+ const setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
errorCode: 'error!',
});
- const errorMessage = this.container.find('p');
+ const errorMessage = container.find('p');
expect(errorMessage.text()).toContain('There was a problem communicating with your device');
});
it('allows retrying registration after an error', () => {
- let setupButton = this.container.find('#js-setup-u2f-device');
+ let setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
errorCode: 'error!',
});
- const retryButton = this.container.find('#U2FTryAgain');
+ const retryButton = container.find('#U2FTryAgain');
retryButton.trigger('click');
- setupButton = this.container.find('#js-setup-u2f-device');
+ setupButton = container.find('#js-setup-u2f-device');
setupButton.trigger('click');
- this.u2fDevice.respondToRegisterRequest({
+ u2fDevice.respondToRegisterRequest({
deviceData: 'this is data from the device',
});
- const registeredMessage = this.container.find('p');
+ const registeredMessage = container.find('p');
expect(registeredMessage.text()).toContain('Your device was successfully set up!');
});
diff --git a/spec/models/clusters/applications/ingress_spec.rb b/spec/models/clusters/applications/ingress_spec.rb
index ba5f48ce6b3..64d667f40f6 100644
--- a/spec/models/clusters/applications/ingress_spec.rb
+++ b/spec/models/clusters/applications/ingress_spec.rb
@@ -21,26 +21,60 @@ describe Clusters::Applications::Ingress do
describe '#can_uninstall?' do
subject { ingress.can_uninstall? }
- it 'returns true if external ip is set and no application exists' do
- ingress.external_ip = 'IP'
+ context 'with jupyter installed' do
+ before do
+ create(:clusters_applications_jupyter, :installed, cluster: ingress.cluster)
+ end
- is_expected.to be_truthy
- end
+ it 'returns false if external_ip_or_hostname? is true' do
+ ingress.external_ip = 'IP'
- it 'returns false if application_jupyter_nil_or_installable? is false' do
- create(:clusters_applications_jupyter, :installed, cluster: ingress.cluster)
+ is_expected.to be_falsey
+ end
- is_expected.to be_falsey
+ it 'returns false if external_ip_or_hostname? is false' do
+ is_expected.to be_falsey
+ end
end
- it 'returns false if application_elastic_stack_nil_or_installable? is false' do
- create(:clusters_applications_elastic_stack, :installed, cluster: ingress.cluster)
+ context 'with jupyter installable' do
+ before do
+ create(:clusters_applications_jupyter, :installable, cluster: ingress.cluster)
+ end
+
+ it 'returns true if external_ip_or_hostname? is true' do
+ ingress.external_ip = 'IP'
+
+ is_expected.to be_truthy
+ end
- is_expected.to be_falsey
+ it 'returns false if external_ip_or_hostname? is false' do
+ is_expected.to be_falsey
+ end
end
- it 'returns false if external_ip_or_hostname? is false' do
- is_expected.to be_falsey
+ context 'with jupyter nil' do
+ it 'returns false if external_ip_or_hostname? is false' do
+ is_expected.to be_falsey
+ end
+
+ context 'if external_ip_or_hostname? is true' do
+ context 'with IP' do
+ before do
+ ingress.external_ip = 'IP'
+ end
+
+ it { is_expected.to be_truthy }
+ end
+
+ context 'with hostname' do
+ before do
+ ingress.external_hostname = 'example.com'
+ end
+
+ it { is_expected.to be_truthy }
+ end
+ end
end
end
diff --git a/spec/models/clusters/applications/prometheus_spec.rb b/spec/models/clusters/applications/prometheus_spec.rb
index ecb87910d2d..ce341e67c14 100644
--- a/spec/models/clusters/applications/prometheus_spec.rb
+++ b/spec/models/clusters/applications/prometheus_spec.rb
@@ -39,6 +39,19 @@ describe Clusters::Applications::Prometheus do
end
end
+ describe 'transition to updating' do
+ let(:project) { create(:project) }
+ let(:cluster) { create(:cluster, projects: [project]) }
+
+ subject { create(:clusters_applications_prometheus, :installed, cluster: cluster) }
+
+ it 'sets last_update_started_at to now' do
+ Timecop.freeze do
+ expect { subject.make_updating }.to change { subject.reload.last_update_started_at }.to be_within(1.second).of(Time.now)
+ end
+ end
+ end
+
describe '#can_uninstall?' do
let(:prometheus) { create(:clusters_applications_prometheus) }
@@ -331,6 +344,38 @@ describe Clusters::Applications::Prometheus do
end
end
+ describe '#updated_since?' do
+ let(:cluster) { create(:cluster) }
+ let(:prometheus_app) { build(:clusters_applications_prometheus, cluster: cluster) }
+ let(:timestamp) { Time.now - 5.minutes }
+
+ around do |example|
+ Timecop.freeze { example.run }
+ end
+
+ before do
+ prometheus_app.last_update_started_at = Time.now
+ end
+
+ context 'when app does not have status failed' do
+ it 'returns true when last update started after the timestamp' do
+ expect(prometheus_app.updated_since?(timestamp)).to be true
+ end
+
+ it 'returns false when last update started before the timestamp' do
+ expect(prometheus_app.updated_since?(Time.now + 5.minutes)).to be false
+ end
+ end
+
+ context 'when app has status failed' do
+ it 'returns false when last update started after the timestamp' do
+ prometheus_app.status = 6
+
+ expect(prometheus_app.updated_since?(timestamp)).to be false
+ end
+ end
+ end
+
describe 'alert manager token' do
subject { create(:clusters_applications_prometheus) }
diff --git a/spec/models/project_spec.rb b/spec/models/project_spec.rb
index ceb6382eb6c..f0423937710 100644
--- a/spec/models/project_spec.rb
+++ b/spec/models/project_spec.rb
@@ -5938,6 +5938,24 @@ describe Project do
end
end
+ describe '#environments_for_scope' do
+ let_it_be(:project, reload: true) { create(:project) }
+
+ before do
+ create_list(:environment, 2, project: project)
+ end
+
+ it 'retrieves all project environments when using the * wildcard' do
+ expect(project.environments_for_scope("*")).to eq(project.environments)
+ end
+
+ it 'retrieves a specific project environment when using the name of that environment' do
+ environment = project.environments.first
+
+ expect(project.environments_for_scope(environment.name)).to eq([environment])
+ end
+ end
+
def finish_job(export_job)
export_job.start
export_job.finish
diff --git a/spec/models/user_canonical_email_spec.rb b/spec/models/user_canonical_email_spec.rb
new file mode 100644
index 00000000000..54a4e968033
--- /dev/null
+++ b/spec/models/user_canonical_email_spec.rb
@@ -0,0 +1,20 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe UserCanonicalEmail do
+ it { is_expected.to belong_to(:user) }
+
+ describe 'validations' do
+ describe 'canonical_email' do
+ it { is_expected.to validate_presence_of(:canonical_email) }
+
+ it 'validates email address', :aggregate_failures do
+ expect(build(:user_canonical_email, canonical_email: 'nonsense')).not_to be_valid
+ expect(build(:user_canonical_email, canonical_email: '@nonsense')).not_to be_valid
+ expect(build(:user_canonical_email, canonical_email: '@nonsense@')).not_to be_valid
+ expect(build(:user_canonical_email, canonical_email: 'nonsense@')).not_to be_valid
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb b/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb
new file mode 100644
index 00000000000..c08b618fe6a
--- /dev/null
+++ b/spec/services/clusters/applications/check_upgrade_progress_service_spec.rb
@@ -0,0 +1,94 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::CheckUpgradeProgressService do
+ RESCHEDULE_PHASES = ::Gitlab::Kubernetes::Pod::PHASES -
+ [::Gitlab::Kubernetes::Pod::SUCCEEDED, ::Gitlab::Kubernetes::Pod::FAILED, ::Gitlab].freeze
+
+ let(:application) { create(:clusters_applications_prometheus, :updating) }
+ let(:service) { described_class.new(application) }
+ let(:phase) { ::Gitlab::Kubernetes::Pod::UNKNOWN }
+ let(:errors) { nil }
+
+ shared_examples 'a terminated upgrade' do
+ it 'removes the POD' do
+ expect(service).to receive(:remove_pod).once
+
+ service.execute
+ end
+ end
+
+ shared_examples 'a not yet terminated upgrade' do |a_phase|
+ let(:phase) { a_phase }
+
+ context "when phase is #{a_phase}" do
+ context 'when not timed out' do
+ it 'reschedule a new check' do
+ expect(::ClusterWaitForAppUpdateWorker).to receive(:perform_in).once
+ expect(service).not_to receive(:remove_pod)
+
+ service.execute
+
+ expect(application).to be_updating
+ expect(application.status_reason).to be_nil
+ end
+ end
+
+ context 'when timed out' do
+ let(:application) { create(:clusters_applications_prometheus, :timed_out, :updating) }
+
+ it_behaves_like 'a terminated upgrade'
+
+ it 'make the application update errored' do
+ expect(::ClusterWaitForAppUpdateWorker).not_to receive(:perform_in)
+
+ service.execute
+
+ expect(application).to be_update_errored
+ expect(application.status_reason).to eq("Update timed out")
+ end
+ end
+ end
+ end
+
+ before do
+ allow(service).to receive(:phase).once.and_return(phase)
+
+ allow(service).to receive(:errors).and_return(errors)
+ allow(service).to receive(:remove_pod).and_return(nil)
+ end
+
+ describe '#execute' do
+ context 'when upgrade pod succeeded' do
+ let(:phase) { ::Gitlab::Kubernetes::Pod::SUCCEEDED }
+
+ it_behaves_like 'a terminated upgrade'
+
+ it 'make the application upgraded' do
+ expect(::ClusterWaitForAppUpdateWorker).not_to receive(:perform_in)
+
+ service.execute
+
+ expect(application).to be_updated
+ expect(application.status_reason).to be_nil
+ end
+ end
+
+ context 'when upgrade pod failed' do
+ let(:phase) { ::Gitlab::Kubernetes::Pod::FAILED }
+ let(:errors) { 'test installation failed' }
+
+ it_behaves_like 'a terminated upgrade'
+
+ it 'make the application update errored' do
+ service.execute
+
+ expect(application).to be_update_errored
+ expect(application.status_reason).to eq(errors)
+ end
+ end
+
+ RESCHEDULE_PHASES.each { |phase| it_behaves_like 'a not yet terminated upgrade', phase }
+ end
+end
diff --git a/spec/services/clusters/applications/prometheus_config_service_spec.rb b/spec/services/clusters/applications/prometheus_config_service_spec.rb
new file mode 100644
index 00000000000..993a697b543
--- /dev/null
+++ b/spec/services/clusters/applications/prometheus_config_service_spec.rb
@@ -0,0 +1,158 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::PrometheusConfigService do
+ include Gitlab::Routing.url_helpers
+
+ let_it_be(:project) { create(:project) }
+ let_it_be(:production) { create(:environment, project: project) }
+ let_it_be(:cluster) { create(:cluster, :provided_by_user, projects: [project]) }
+
+ let(:application) do
+ create(:clusters_applications_prometheus, :installed, cluster: cluster)
+ end
+
+ subject { described_class.new(project, cluster, application).execute(input) }
+
+ describe '#execute' do
+ let(:input) do
+ YAML.load_file(Rails.root.join('vendor/prometheus/values.yaml'))
+ end
+
+ context 'with alerts' do
+ let!(:alert) do
+ create(:prometheus_alert, project: project, environment: production)
+ end
+
+ it 'enables alertmanager' do
+ expect(subject.dig('alertmanager', 'enabled')).to eq(true)
+ end
+
+ describe 'alertmanagerFiles' do
+ let(:alertmanager) do
+ subject.dig('alertmanagerFiles', 'alertmanager.yml')
+ end
+
+ it 'contains receivers and route' do
+ expect(alertmanager.keys).to contain_exactly('receivers', 'route')
+ end
+
+ describe 'receivers' do
+ let(:receiver) { alertmanager.dig('receivers', 0) }
+ let(:webhook_config) { receiver.dig('webhook_configs', 0) }
+
+ let(:notify_url) do
+ notify_project_prometheus_alerts_url(project, format: :json)
+ end
+
+ it 'sets receiver' do
+ expect(receiver['name']).to eq('gitlab')
+ end
+
+ it 'sets webhook_config' do
+ expect(webhook_config).to eq(
+ 'url' => notify_url,
+ 'send_resolved' => true,
+ 'http_config' => {
+ 'bearer_token' => application.alert_manager_token
+ }
+ )
+ end
+ end
+
+ describe 'route' do
+ let(:route) { alertmanager.fetch('route') }
+
+ it 'sets route' do
+ expect(route).to eq(
+ 'receiver' => 'gitlab',
+ 'group_wait' => '30s',
+ 'group_interval' => '5m',
+ 'repeat_interval' => '4h'
+ )
+ end
+ end
+ end
+
+ describe 'serverFiles' do
+ let(:groups) { subject.dig('serverFiles', 'alerts', 'groups') }
+
+ it 'sets the alerts' do
+ rules = groups.dig(0, 'rules')
+ expect(rules.size).to eq(1)
+
+ expect(rules.first['alert']).to eq(alert.title)
+ end
+
+ context 'with parameterized queries' do
+ let!(:alert) do
+ create(:prometheus_alert,
+ project: project,
+ environment: production,
+ prometheus_metric: metric)
+ end
+
+ let(:metric) do
+ create(:prometheus_metric, query: query, project: project)
+ end
+
+ let(:query) { '%{ci_environment_slug}' }
+
+ it 'substitutes query variables' do
+ expect(Gitlab::Prometheus::QueryVariables)
+ .to receive(:call)
+ .with(production)
+ .and_call_original
+
+ expr = groups.dig(0, 'rules', 0, 'expr')
+ expect(expr).to include(production.name)
+ end
+ end
+
+ context 'with multiple environments' do
+ let(:staging) { create(:environment, project: project) }
+
+ before do
+ create(:prometheus_alert, project: project, environment: production)
+ create(:prometheus_alert, project: project, environment: staging)
+ end
+
+ it 'sets alerts for multiple environment' do
+ env_names = groups.map { |group| group['name'] }
+ expect(env_names).to contain_exactly(
+ "#{production.name}.rules",
+ "#{staging.name}.rules"
+ )
+ end
+
+ it 'substitutes query variables once per environment' do
+ expect(Gitlab::Prometheus::QueryVariables)
+ .to receive(:call)
+ .with(production)
+
+ expect(Gitlab::Prometheus::QueryVariables)
+ .to receive(:call)
+ .with(staging)
+
+ subject
+ end
+ end
+ end
+ end
+
+ context 'without alerts' do
+ it 'disables alertmanager' do
+ expect(subject.dig('alertmanager', 'enabled')).to eq(false)
+ end
+
+ it 'removes alertmanagerFiles' do
+ expect(subject).not_to include('alertmanagerFiles')
+ end
+
+ it 'removes alerts' do
+ expect(subject.dig('serverFiles', 'alerts')).to eq({})
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/applications/prometheus_update_service_spec.rb b/spec/services/clusters/applications/prometheus_update_service_spec.rb
new file mode 100644
index 00000000000..078b01d2777
--- /dev/null
+++ b/spec/services/clusters/applications/prometheus_update_service_spec.rb
@@ -0,0 +1,92 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::PrometheusUpdateService do
+ describe '#execute' do
+ let(:project) { create(:project) }
+ let(:environment) { create(:environment, project: project) }
+ let(:cluster) { create(:cluster, :provided_by_user, :with_installed_helm, projects: [project]) }
+ let(:application) { create(:clusters_applications_prometheus, :installed, cluster: cluster) }
+ let(:empty_alerts_values_update_yaml) { "---\nalertmanager:\n enabled: false\nserverFiles:\n alerts: {}\n" }
+ let!(:patch_command) { application.patch_command(empty_alerts_values_update_yaml) }
+ let(:helm_client) { instance_double(::Gitlab::Kubernetes::Helm::API) }
+
+ subject(:service) { described_class.new(application, project) }
+
+ before do
+ allow(service).to receive(:patch_command).with(empty_alerts_values_update_yaml).and_return(patch_command)
+ allow(service).to receive(:helm_api).and_return(helm_client)
+ end
+
+ context 'when there are no errors' do
+ before do
+ expect(helm_client).to receive(:update).with(patch_command)
+
+ allow(::ClusterWaitForAppUpdateWorker)
+ .to receive(:perform_in)
+ .and_return(nil)
+ end
+
+ it 'make the application updating' do
+ expect(application.cluster).not_to be_nil
+
+ service.execute
+
+ expect(application).to be_updating
+ end
+
+ it 'updates current config' do
+ prometheus_config_service = spy(:prometheus_config_service)
+
+ expect(Clusters::Applications::PrometheusConfigService)
+ .to receive(:new)
+ .with(project, cluster, application)
+ .and_return(prometheus_config_service)
+
+ expect(prometheus_config_service)
+ .to receive(:execute)
+ .and_return(YAML.safe_load(empty_alerts_values_update_yaml))
+
+ service.execute
+ end
+
+ it 'schedules async update status check' do
+ expect(::ClusterWaitForAppUpdateWorker).to receive(:perform_in).once
+
+ service.execute
+ end
+ end
+
+ context 'when k8s cluster communication fails' do
+ before do
+ error = ::Kubeclient::HttpError.new(500, 'system failure', nil)
+ allow(helm_client).to receive(:update).and_raise(error)
+ end
+
+ it 'make the application update errored' do
+ service.execute
+
+ expect(application).to be_update_errored
+ expect(application.status_reason).to match(/kubernetes error:/i)
+ end
+ end
+
+ context 'when application cannot be persisted' do
+ let(:application) { build(:clusters_applications_prometheus, :installed) }
+
+ before do
+ allow(application).to receive(:make_updating!).once
+ .and_raise(ActiveRecord::RecordInvalid.new(application))
+ end
+
+ it 'make the application update errored' do
+ expect(helm_client).not_to receive(:update)
+
+ service.execute
+
+ expect(application).to be_update_errored
+ end
+ end
+ end
+end
diff --git a/spec/services/clusters/applications/schedule_update_service_spec.rb b/spec/services/clusters/applications/schedule_update_service_spec.rb
new file mode 100644
index 00000000000..0764f5b6a97
--- /dev/null
+++ b/spec/services/clusters/applications/schedule_update_service_spec.rb
@@ -0,0 +1,37 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Clusters::Applications::ScheduleUpdateService do
+ describe '#execute' do
+ let(:project) { create(:project) }
+
+ around do |example|
+ Timecop.freeze { example.run }
+ end
+
+ context 'when application is able to be updated' do
+ context 'when the application was recently scheduled' do
+ it 'schedules worker with a backoff delay' do
+ application = create(:clusters_applications_prometheus, :installed, last_update_started_at: Time.now + 5.minutes)
+ service = described_class.new(application, project)
+
+ expect(::ClusterUpdateAppWorker).to receive(:perform_in).with(described_class::BACKOFF_DELAY, application.name, application.id, project.id, Time.now).once
+
+ service.execute
+ end
+ end
+
+ context 'when the application has not been recently updated' do
+ it 'schedules worker' do
+ application = create(:clusters_applications_prometheus, :installed)
+ service = described_class.new(application, project)
+
+ expect(::ClusterUpdateAppWorker).to receive(:perform_async).with(application.name, application.id, project.id, Time.now).once
+
+ service.execute
+ end
+ end
+ end
+ end
+end
diff --git a/spec/services/users/build_service_spec.rb b/spec/services/users/build_service_spec.rb
index aed5d2598ef..146819c7f44 100644
--- a/spec/services/users/build_service_spec.rb
+++ b/spec/services/users/build_service_spec.rb
@@ -16,6 +16,14 @@ describe Users::BuildService do
expect(service.execute).to be_valid
end
+ context 'calls the UpdateCanonicalEmailService' do
+ specify do
+ expect(Users::UpdateCanonicalEmailService).to receive(:new).and_call_original
+
+ service.execute
+ end
+ end
+
context 'allowed params' do
let(:params) do
{
diff --git a/spec/services/users/create_service_spec.rb b/spec/services/users/create_service_spec.rb
index a139dc01314..c783a1403df 100644
--- a/spec/services/users/create_service_spec.rb
+++ b/spec/services/users/create_service_spec.rb
@@ -8,10 +8,11 @@ describe Users::CreateService do
context 'with an admin user' do
let(:service) { described_class.new(admin_user, params) }
+ let(:email) { 'jd@example.com' }
context 'when required parameters are provided' do
let(:params) do
- { name: 'John Doe', username: 'jduser', email: 'jd@example.com', password: 'mydummypass' }
+ { name: 'John Doe', username: 'jduser', email: email, password: 'mydummypass' }
end
it 'returns a persisted user' do
diff --git a/spec/services/users/update_canonical_email_service_spec.rb b/spec/services/users/update_canonical_email_service_spec.rb
new file mode 100644
index 00000000000..68ba1b75b6c
--- /dev/null
+++ b/spec/services/users/update_canonical_email_service_spec.rb
@@ -0,0 +1,116 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe Users::UpdateCanonicalEmailService do
+ let(:other_email) { "differentaddress@includeddomain.com" }
+
+ before do
+ stub_const("Users::UpdateCanonicalEmailService::INCLUDED_DOMAINS_PATTERN", [/includeddomain/])
+ end
+
+ describe '#initialize' do
+ context 'unsuccessful' do
+ it 'raises an error if there is no user' do
+ expect { described_class.new(user: nil) }.to raise_error(ArgumentError, /Please provide a user/)
+ end
+
+ it 'raises an error if the object is not a User' do
+ expect { described_class.new(user: 123) }.to raise_error(ArgumentError, /Please provide a user/)
+ end
+ end
+
+ context 'when a user is provided' do
+ it 'does not error' do
+ user = build(:user)
+
+ expect { described_class.new(user: user) }.not_to raise_error
+ end
+ end
+ end
+
+ describe "#canonicalize_email" do
+ let(:user) { build(:user) }
+ let(:subject) { described_class.new(user: user) }
+
+ context 'when the email domain is included' do
+ context 'strips out any . or anything after + in the agent for included domains' do
+ using RSpec::Parameterized::TableSyntax
+
+ let(:expected_result) { 'user@includeddomain.com' }
+
+ where(:raw_email, :expected_result) do
+ 'user@includeddomain.com' | 'user@includeddomain.com'
+ 'u.s.e.r@includeddomain.com' | 'user@includeddomain.com'
+ 'user+123@includeddomain.com' | 'user@includeddomain.com'
+ 'us.er+123@includeddomain.com' | 'user@includeddomain.com'
+ end
+
+ with_them do
+ before do
+ user.email = raw_email
+ end
+
+ specify do
+ subject.execute
+
+ expect(user.user_canonical_email).not_to be_nil
+ expect(user.user_canonical_email.canonical_email).to eq expected_result
+ end
+ end
+ end
+
+ context 'when the user has an existing canonical email' do
+ it 'updates the user canonical email record' do
+ user.user_canonical_email = build(:user_canonical_email, canonical_email: other_email)
+ user.email = "us.er+123@includeddomain.com"
+
+ subject.execute
+
+ expect(user.user_canonical_email.canonical_email).to eq "user@includeddomain.com"
+ end
+ end
+ end
+
+ context 'when the email domain is not included' do
+ it 'returns nil' do
+ user.email = "u.s.er+343@excludeddomain.com"
+
+ subject.execute
+
+ expect(user.user_canonical_email).to be_nil
+ end
+
+ it 'destroys any existing UserCanonicalEmail record' do
+ user.email = "u.s.er+343@excludeddomain.com"
+ user.user_canonical_email = build(:user_canonical_email, canonical_email: other_email)
+ expect(user.user_canonical_email).to receive(:delete)
+
+ subject.execute
+ end
+ end
+
+ context 'when the user email is not processable' do
+ [nil, 'nonsense'].each do |invalid_address|
+ before do
+ user.email = invalid_address
+ end
+
+ specify do
+ subject.execute
+
+ expect(user.user_canonical_email).to be_nil
+ end
+
+ it 'preserves any existing record' do
+ user.email = nil
+ user.user_canonical_email = build(:user_canonical_email, canonical_email: other_email)
+
+ subject.execute
+
+ expect(user.user_canonical_email.canonical_email).to eq other_email
+ end
+ end
+ end
+ end
+end
diff --git a/spec/services/users/update_service_spec.rb b/spec/services/users/update_service_spec.rb
index 24738a79045..bd54ca97431 100644
--- a/spec/services/users/update_service_spec.rb
+++ b/spec/services/users/update_service_spec.rb
@@ -71,6 +71,32 @@ describe Users::UpdateService do
expect(user.job_title).to eq('Backend Engineer')
end
+ context 'updating canonical email' do
+ context 'if email was changed' do
+ subject do
+ update_user(user, email: 'user+extrastuff@example.com')
+ end
+
+ it 'calls canonicalize_email' do
+ expect_next_instance_of(Users::UpdateCanonicalEmailService) do |service|
+ expect(service).to receive(:execute)
+ end
+
+ subject
+ end
+ end
+
+ context 'if email was NOT changed' do
+ subject do
+ update_user(user, job_title: 'supreme leader of the universe')
+ end
+
+ it 'skips update canonicalize email service call' do
+ expect { subject }.not_to change { user.user_canonical_email }
+ end
+ end
+ end
+
def update_user(user, opts)
described_class.new(user, opts.merge(user: user)).execute
end
diff --git a/spec/workers/cluster_update_app_worker_spec.rb b/spec/workers/cluster_update_app_worker_spec.rb
new file mode 100644
index 00000000000..e540ede4bc0
--- /dev/null
+++ b/spec/workers/cluster_update_app_worker_spec.rb
@@ -0,0 +1,98 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe ClusterUpdateAppWorker do
+ include ExclusiveLeaseHelpers
+
+ let_it_be(:project) { create(:project) }
+
+ let(:prometheus_update_service) { spy }
+
+ subject { described_class.new }
+
+ around do |example|
+ Timecop.freeze(Time.now) { example.run }
+ end
+
+ before do
+ allow(::Clusters::Applications::PrometheusUpdateService).to receive(:new).and_return(prometheus_update_service)
+ end
+
+ describe '#perform' do
+ context 'when the application last_update_started_at is higher than the time the job was scheduled in' do
+ it 'does nothing' do
+ application = create(:clusters_applications_prometheus, :updated, last_update_started_at: Time.now)
+
+ expect(prometheus_update_service).not_to receive(:execute)
+
+ expect(subject.perform(application.name, application.id, project.id, Time.now - 5.minutes)).to be_nil
+ end
+ end
+
+ context 'when another worker is already running' do
+ it 'returns nil' do
+ application = create(:clusters_applications_prometheus, :updating)
+
+ expect(subject.perform(application.name, application.id, project.id, Time.now)).to be_nil
+ end
+ end
+
+ it 'executes PrometheusUpdateService' do
+ application = create(:clusters_applications_prometheus, :installed)
+
+ expect(prometheus_update_service).to receive(:execute)
+
+ subject.perform(application.name, application.id, project.id, Time.now)
+ end
+
+ context 'with exclusive lease' do
+ let(:application) { create(:clusters_applications_prometheus, :installed) }
+ let(:lease_key) { "#{described_class.name.underscore}-#{application.id}" }
+
+ before do
+ allow(Gitlab::ExclusiveLease).to receive(:new)
+ stub_exclusive_lease_taken(lease_key)
+ end
+
+ it 'does not allow same app to be updated concurrently by same project' do
+ expect(Clusters::Applications::PrometheusUpdateService).not_to receive(:new)
+
+ subject.perform(application.name, application.id, project.id, Time.now)
+ end
+
+ it 'does not allow same app to be updated concurrently by different project' do
+ project1 = create(:project)
+
+ expect(Clusters::Applications::PrometheusUpdateService).not_to receive(:new)
+
+ subject.perform(application.name, application.id, project1.id, Time.now)
+ end
+
+ it 'allows different app to be updated concurrently by same project' do
+ application2 = create(:clusters_applications_prometheus, :installed)
+ lease_key2 = "#{described_class.name.underscore}-#{application2.id}"
+
+ stub_exclusive_lease(lease_key2)
+
+ expect(Clusters::Applications::PrometheusUpdateService).to receive(:new)
+ .with(application2, project)
+
+ subject.perform(application2.name, application2.id, project.id, Time.now)
+ end
+
+ it 'allows different app to be updated by different project' do
+ application2 = create(:clusters_applications_prometheus, :installed)
+ lease_key2 = "#{described_class.name.underscore}-#{application2.id}"
+ project2 = create(:project)
+
+ stub_exclusive_lease(lease_key2)
+
+ expect(Clusters::Applications::PrometheusUpdateService).to receive(:new)
+ .with(application2, project2)
+
+ subject.perform(application2.name, application2.id, project2.id, Time.now)
+ end
+ end
+ end
+end
diff --git a/spec/workers/cluster_wait_for_app_update_worker_spec.rb b/spec/workers/cluster_wait_for_app_update_worker_spec.rb
new file mode 100644
index 00000000000..f1206bd85cb
--- /dev/null
+++ b/spec/workers/cluster_wait_for_app_update_worker_spec.rb
@@ -0,0 +1,27 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+describe ClusterWaitForAppUpdateWorker do
+ let(:check_upgrade_progress_service) { spy }
+
+ before do
+ allow(::Clusters::Applications::CheckUpgradeProgressService).to receive(:new).and_return(check_upgrade_progress_service)
+ end
+
+ it 'runs CheckUpgradeProgressService when application is found' do
+ application = create(:clusters_applications_prometheus)
+
+ expect(check_upgrade_progress_service).to receive(:execute)
+
+ subject.perform(application.name, application.id)
+ end
+
+ it 'does not run CheckUpgradeProgressService when application is not found' do
+ expect(check_upgrade_progress_service).not_to receive(:execute)
+
+ expect do
+ subject.perform("prometheus", -1)
+ end.to raise_error(ActiveRecord::RecordNotFound)
+ end
+end