summaryrefslogtreecommitdiff
path: root/app/models/clusters
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2019-11-19 22:11:55 +0000
committerGitLab Bot <gitlab-bot@gitlab.com>2019-11-19 22:11:55 +0000
commit5a8431feceba47fd8e1804d9aa1b1730606b71d5 (patch)
treee5df8e0ceee60f4af8093f5c4c2f934b8abced05 /app/models/clusters
parent4d477238500c347c6553d335d920bedfc5a46869 (diff)
downloadgitlab-ce-5a8431feceba47fd8e1804d9aa1b1730606b71d5.tar.gz
Add latest changes from gitlab-org/gitlab@12-5-stable-ee
Diffstat (limited to 'app/models/clusters')
-rw-r--r--app/models/clusters/applications/cert_manager.rb2
-rw-r--r--app/models/clusters/applications/crossplane.rb60
-rw-r--r--app/models/clusters/applications/elastic_stack.rb108
-rw-r--r--app/models/clusters/applications/ingress.rb73
-rw-r--r--app/models/clusters/applications/runner.rb2
-rw-r--r--app/models/clusters/cluster.rb67
-rw-r--r--app/models/clusters/clusters_hierarchy.rb41
-rw-r--r--app/models/clusters/concerns/application_core.rb18
-rw-r--r--app/models/clusters/instance.rb4
-rw-r--r--app/models/clusters/providers/aws.rb26
-rw-r--r--app/models/clusters/providers/gcp.rb4
11 files changed, 385 insertions, 20 deletions
diff --git a/app/models/clusters/applications/cert_manager.rb b/app/models/clusters/applications/cert_manager.rb
index 18cbf827a67..7ba04d1a2de 100644
--- a/app/models/clusters/applications/cert_manager.rb
+++ b/app/models/clusters/applications/cert_manager.rb
@@ -65,7 +65,7 @@ module Clusters
end
def retry_command(command)
- "for i in $(seq 1 30); do #{command} && s=0 && break || s=$?; sleep 1s; echo \"Retrying ($i)...\"; done; (exit $s)"
+ "for i in $(seq 1 90); do #{command} && s=0 && break || s=$?; sleep 1s; echo \"Retrying ($i)...\"; done; (exit $s)"
end
def post_delete_script
diff --git a/app/models/clusters/applications/crossplane.rb b/app/models/clusters/applications/crossplane.rb
new file mode 100644
index 00000000000..36246b26066
--- /dev/null
+++ b/app/models/clusters/applications/crossplane.rb
@@ -0,0 +1,60 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Applications
+ class Crossplane < ApplicationRecord
+ VERSION = '0.4.1'
+
+ self.table_name = 'clusters_applications_crossplane'
+
+ include ::Clusters::Concerns::ApplicationCore
+ include ::Clusters::Concerns::ApplicationStatus
+ include ::Clusters::Concerns::ApplicationVersion
+ include ::Clusters::Concerns::ApplicationData
+
+ default_value_for :version, VERSION
+
+ default_value_for :stack do |crossplane|
+ ''
+ end
+
+ validates :stack, presence: true
+
+ def chart
+ 'crossplane/crossplane'
+ end
+
+ def repository
+ 'https://charts.crossplane.io/alpha'
+ end
+
+ def install_command
+ Gitlab::Kubernetes::Helm::InstallCommand.new(
+ name: 'crossplane',
+ repository: repository,
+ version: VERSION,
+ rbac: cluster.platform_kubernetes_rbac?,
+ chart: chart,
+ files: files
+ )
+ end
+
+ def values
+ crossplane_values.to_yaml
+ end
+
+ private
+
+ def crossplane_values
+ {
+ "clusterStacks" => {
+ self.stack => {
+ "deploy" => true,
+ "version" => "alpha"
+ }
+ }
+ }
+ end
+ end
+ end
+end
diff --git a/app/models/clusters/applications/elastic_stack.rb b/app/models/clusters/applications/elastic_stack.rb
new file mode 100644
index 00000000000..8589f8c00cb
--- /dev/null
+++ b/app/models/clusters/applications/elastic_stack.rb
@@ -0,0 +1,108 @@
+# frozen_string_literal: true
+
+module Clusters
+ module Applications
+ class ElasticStack < ApplicationRecord
+ VERSION = '1.8.0'
+
+ ELASTICSEARCH_PORT = 9200
+
+ self.table_name = 'clusters_applications_elastic_stacks'
+
+ include ::Clusters::Concerns::ApplicationCore
+ include ::Clusters::Concerns::ApplicationStatus
+ include ::Clusters::Concerns::ApplicationVersion
+ include ::Clusters::Concerns::ApplicationData
+ include ::Gitlab::Utils::StrongMemoize
+
+ default_value_for :version, VERSION
+
+ def set_initial_status
+ return unless not_installable?
+ return unless cluster&.application_ingress_available?
+
+ ingress = cluster.application_ingress
+ self.status = status_states[:installable] if ingress.external_ip_or_hostname?
+ end
+
+ def chart
+ 'stable/elastic-stack'
+ end
+
+ def values
+ content_values.to_yaml
+ end
+
+ def install_command
+ Gitlab::Kubernetes::Helm::InstallCommand.new(
+ name: 'elastic-stack',
+ version: VERSION,
+ rbac: cluster.platform_kubernetes_rbac?,
+ chart: chart,
+ files: files
+ )
+ end
+
+ def uninstall_command
+ Gitlab::Kubernetes::Helm::DeleteCommand.new(
+ name: 'elastic-stack',
+ rbac: cluster.platform_kubernetes_rbac?,
+ files: files,
+ postdelete: post_delete_script
+ )
+ end
+
+ def elasticsearch_client
+ strong_memoize(:elasticsearch_client) do
+ next unless kube_client
+
+ proxy_url = kube_client.proxy_url('service', 'elastic-stack-elasticsearch-client', ::Clusters::Applications::ElasticStack::ELASTICSEARCH_PORT, Gitlab::Kubernetes::Helm::NAMESPACE)
+
+ Elasticsearch::Client.new(url: proxy_url) do |faraday|
+ # ensures headers containing auth data are appended to original client options
+ faraday.headers.merge!(kube_client.headers)
+ # ensure TLS certs are properly verified
+ faraday.ssl[:verify] = kube_client.ssl_options[:verify_ssl]
+ faraday.ssl[:cert_store] = kube_client.ssl_options[:cert_store]
+ end
+
+ rescue Kubeclient::HttpError => error
+ # If users have mistakenly set parameters or removed the depended clusters,
+ # `proxy_url` could raise an exception because gitlab can not communicate with the cluster.
+ # We check for a nil client in downstream use and behaviour is equivalent to an empty state
+ log_exception(error, :failed_to_create_elasticsearch_client)
+ end
+ end
+
+ private
+
+ def specification
+ {
+ "kibana" => {
+ "ingress" => {
+ "hosts" => [kibana_hostname],
+ "tls" => [{
+ "hosts" => [kibana_hostname],
+ "secretName" => "kibana-cert"
+ }]
+ }
+ }
+ }
+ end
+
+ def content_values
+ YAML.load_file(chart_values_file).deep_merge!(specification)
+ end
+
+ def post_delete_script
+ [
+ Gitlab::Kubernetes::KubectlCmd.delete("pvc", "--selector", "release=elastic-stack")
+ ].compact
+ end
+
+ def kube_client
+ cluster&.kubeclient&.core_client
+ end
+ end
+ end
+end
diff --git a/app/models/clusters/applications/ingress.rb b/app/models/clusters/applications/ingress.rb
index 885e4ff7197..d140649af3c 100644
--- a/app/models/clusters/applications/ingress.rb
+++ b/app/models/clusters/applications/ingress.rb
@@ -21,6 +21,7 @@ module Clusters
}
FETCH_IP_ADDRESS_DELAY = 30.seconds
+ MODSEC_SIDECAR_INITIAL_DELAY_SECONDS = 10
state_machine :status do
after_transition any => [:installed] do |application|
@@ -40,7 +41,7 @@ module Clusters
end
def allowed_to_uninstall?
- external_ip_or_hostname? && application_jupyter_nil_or_installable?
+ external_ip_or_hostname? && application_jupyter_nil_or_installable? && application_elastic_stack_nil_or_installable?
end
def install_command
@@ -78,12 +79,74 @@ module Clusters
"controller" => {
"config" => {
"enable-modsecurity" => "true",
- "enable-owasp-modsecurity-crs" => "true"
- }
+ "enable-owasp-modsecurity-crs" => "true",
+ "modsecurity.conf" => modsecurity_config_content
+ },
+ "extraContainers" => [
+ {
+ "name" => "modsecurity-log",
+ "image" => "busybox",
+ "args" => [
+ "/bin/sh",
+ "-c",
+ "tail -f /var/log/modsec/audit.log"
+ ],
+ "volumeMounts" => [
+ {
+ "name" => "modsecurity-log-volume",
+ "mountPath" => "/var/log/modsec",
+ "readOnly" => true
+ }
+ ],
+ "startupProbe" => {
+ "exec" => {
+ "command" => ["ls", "/var/log/modsec"]
+ },
+ "initialDelaySeconds" => MODSEC_SIDECAR_INITIAL_DELAY_SECONDS
+ }
+ }
+ ],
+ "extraVolumeMounts" => [
+ {
+ "name" => "modsecurity-template-volume",
+ "mountPath" => "/etc/nginx/modsecurity/modsecurity.conf",
+ "subPath" => "modsecurity.conf"
+ },
+ {
+ "name" => "modsecurity-log-volume",
+ "mountPath" => "/var/log/modsec"
+ }
+ ],
+ "extraVolumes" => [
+ {
+ "name" => "modsecurity-template-volume",
+ "configMap" => {
+ "name" => "ingress-nginx-ingress-controller",
+ "items" => [
+ {
+ "key" => "modsecurity.conf",
+ "path" => "modsecurity.conf"
+ }
+ ]
+ }
+ },
+ {
+ "name" => "modsecurity-log-volume",
+ "emptyDir" => {}
+ }
+ ]
}
}
end
+ def modsecurity_config_content
+ File.read(modsecurity_config_file_path)
+ end
+
+ def modsecurity_config_file_path
+ Rails.root.join('vendor', 'ingress', 'modsecurity.conf')
+ end
+
def content_values
YAML.load_file(chart_values_file).deep_merge!(specification)
end
@@ -91,6 +154,10 @@ module Clusters
def application_jupyter_nil_or_installable?
cluster.application_jupyter.nil? || cluster.application_jupyter&.installable?
end
+
+ def application_elastic_stack_nil_or_installable?
+ cluster.application_elastic_stack.nil? || cluster.application_elastic_stack&.installable?
+ end
end
end
end
diff --git a/app/models/clusters/applications/runner.rb b/app/models/clusters/applications/runner.rb
index 954046c143b..37ba8a7c97e 100644
--- a/app/models/clusters/applications/runner.rb
+++ b/app/models/clusters/applications/runner.rb
@@ -3,7 +3,7 @@
module Clusters
module Applications
class Runner < ApplicationRecord
- VERSION = '0.9.0'
+ VERSION = '0.10.1'
self.table_name = 'clusters_applications_runners'
diff --git a/app/models/clusters/cluster.rb b/app/models/clusters/cluster.rb
index d6f5d7c3f93..f522f3f2fdb 100644
--- a/app/models/clusters/cluster.rb
+++ b/app/models/clusters/cluster.rb
@@ -6,20 +6,21 @@ module Clusters
include Gitlab::Utils::StrongMemoize
include FromUnion
include ReactiveCaching
+ include AfterCommitQueue
self.table_name = 'clusters'
- PROJECT_ONLY_APPLICATIONS = {
- }.freeze
APPLICATIONS = {
Applications::Helm.application_name => Applications::Helm,
Applications::Ingress.application_name => Applications::Ingress,
Applications::CertManager.application_name => Applications::CertManager,
+ Applications::Crossplane.application_name => Applications::Crossplane,
Applications::Prometheus.application_name => Applications::Prometheus,
Applications::Runner.application_name => Applications::Runner,
Applications::Jupyter.application_name => Applications::Jupyter,
- Applications::Knative.application_name => Applications::Knative
- }.merge(PROJECT_ONLY_APPLICATIONS).freeze
+ Applications::Knative.application_name => Applications::Knative,
+ Applications::ElasticStack.application_name => Applications::ElasticStack
+ }.freeze
DEFAULT_ENVIRONMENT = '*'
KUBE_INGRESS_BASE_DOMAIN = 'KUBE_INGRESS_BASE_DOMAIN'
@@ -47,14 +48,17 @@ module Clusters
has_one_cluster_application :helm
has_one_cluster_application :ingress
has_one_cluster_application :cert_manager
+ has_one_cluster_application :crossplane
has_one_cluster_application :prometheus
has_one_cluster_application :runner
has_one_cluster_application :jupyter
has_one_cluster_application :knative
+ has_one_cluster_application :elastic_stack
has_many :kubernetes_namespaces
accepts_nested_attributes_for :provider_gcp, update_only: true
+ accepts_nested_attributes_for :provider_aws, update_only: true
accepts_nested_attributes_for :platform_kubernetes, update_only: true
validates :name, cluster_name: true
@@ -72,6 +76,7 @@ module Clusters
delegate :status, to: :provider, allow_nil: true
delegate :status_reason, to: :provider, allow_nil: true
delegate :on_creation?, to: :provider, allow_nil: true
+ delegate :knative_pre_installed?, to: :provider, allow_nil: true
delegate :active?, to: :platform_kubernetes, prefix: true, allow_nil: true
delegate :rbac?, to: :platform_kubernetes, prefix: true, allow_nil: true
@@ -115,6 +120,8 @@ module Clusters
scope :default_environment, -> { where(environment_scope: DEFAULT_ENVIRONMENT) }
+ scope :for_project_namespace, -> (namespace_id) { joins(:projects).where(projects: { namespace_id: namespace_id }) }
+
def self.ancestor_clusters_for_clusterable(clusterable, hierarchy_order: :asc)
return [] if clusterable.is_a?(Instance)
@@ -124,7 +131,55 @@ module Clusters
hierarchy_groups.flat_map(&:clusters) + Instance.new.clusters
end
+ state_machine :cleanup_status, initial: :cleanup_not_started do
+ state :cleanup_not_started, value: 1
+ state :cleanup_uninstalling_applications, value: 2
+ state :cleanup_removing_project_namespaces, value: 3
+ state :cleanup_removing_service_account, value: 4
+ state :cleanup_errored, value: 5
+
+ event :start_cleanup do |cluster|
+ transition [:cleanup_not_started, :cleanup_errored] => :cleanup_uninstalling_applications
+ end
+
+ event :continue_cleanup do
+ transition(
+ cleanup_uninstalling_applications: :cleanup_removing_project_namespaces,
+ cleanup_removing_project_namespaces: :cleanup_removing_service_account)
+ end
+
+ event :make_cleanup_errored do
+ transition any => :cleanup_errored
+ end
+
+ before_transition any => [:cleanup_errored] do |cluster, transition|
+ status_reason = transition.args.first
+ cluster.cleanup_status_reason = status_reason if status_reason
+ end
+
+ after_transition [:cleanup_not_started, :cleanup_errored] => :cleanup_uninstalling_applications do |cluster|
+ cluster.run_after_commit do
+ Clusters::Cleanup::AppWorker.perform_async(cluster.id)
+ end
+ end
+
+ after_transition cleanup_uninstalling_applications: :cleanup_removing_project_namespaces do |cluster|
+ cluster.run_after_commit do
+ Clusters::Cleanup::ProjectNamespaceWorker.perform_async(cluster.id)
+ end
+ end
+
+ after_transition cleanup_removing_project_namespaces: :cleanup_removing_service_account do |cluster|
+ cluster.run_after_commit do
+ Clusters::Cleanup::ServiceAccountWorker.perform_async(cluster.id)
+ end
+ end
+ end
+
def status_name
+ return cleanup_status_name if cleanup_errored?
+ return :cleanup_ongoing unless cleanup_not_started?
+
provider&.status_name || connection_status.presence || :created
end
@@ -207,10 +262,6 @@ module Clusters
end
end
- def knative_pre_installed?
- provider&.knative_pre_installed?
- end
-
private
def unique_management_project_environment_scope
diff --git a/app/models/clusters/clusters_hierarchy.rb b/app/models/clusters/clusters_hierarchy.rb
index a906eb2888b..c9c18d8c96a 100644
--- a/app/models/clusters/clusters_hierarchy.rb
+++ b/app/models/clusters/clusters_hierarchy.rb
@@ -20,7 +20,7 @@ module Clusters
.with
.recursive(cte.to_arel)
.from(cte_alias)
- .order(DEPTH_COLUMN => :asc)
+ .order(depth_order_clause)
end
private
@@ -40,7 +40,7 @@ module Clusters
end
if clusterable.is_a?(::Project) && include_management_project
- cte << management_clusters_query
+ cte << same_namespace_management_clusters_query
end
cte << base_query
@@ -49,13 +49,42 @@ module Clusters
cte
end
+ # Returns project-level clusters where the project is the management project
+ # for the cluster. The management project has to be in the same namespace /
+ # group as the cluster's project.
+ #
+ # Support for management project in sub-groups is planned in
+ # https://gitlab.com/gitlab-org/gitlab/issues/34650
+ #
+ # NB: group_parent_id is un-used but we still need to match the same number of
+ # columns as other queries in the CTE.
+ def same_namespace_management_clusters_query
+ clusterable.management_clusters
+ .project_type
+ .select([clusters_star, 'NULL AS group_parent_id', "0 AS #{DEPTH_COLUMN}"])
+ .for_project_namespace(clusterable.namespace_id)
+ end
+
# Management clusters should be first in the hierarchy so we use 0 for the
# depth column.
#
- # group_parent_id is un-used but we still need to match the same number of
- # columns as other queries in the CTE.
- def management_clusters_query
- clusterable.management_clusters.select([clusters_star, 'NULL AS group_parent_id', "0 AS #{DEPTH_COLUMN}"])
+ # Only applicable if the clusterable is a project (most especially when
+ # requesting project.deployment_platform).
+ def depth_order_clause
+ return { DEPTH_COLUMN => :asc } unless clusterable.is_a?(::Project) && include_management_project
+
+ order = <<~SQL
+ (CASE clusters.management_project_id
+ WHEN :project_id THEN 0
+ ELSE #{DEPTH_COLUMN}
+ END) ASC
+ SQL
+
+ values = {
+ project_id: clusterable.id
+ }
+
+ model.sanitize_sql_array([Arel.sql(order), values])
end
def group_clusters_base_query
diff --git a/app/models/clusters/concerns/application_core.rb b/app/models/clusters/concerns/application_core.rb
index 979cf0645f5..21b98534808 100644
--- a/app/models/clusters/concerns/application_core.rb
+++ b/app/models/clusters/concerns/application_core.rb
@@ -60,6 +60,24 @@ module Clusters
# Override if your application needs any action after
# being uninstalled by Helm
end
+
+ def logger
+ @logger ||= Gitlab::Kubernetes::Logger.build
+ end
+
+ def log_exception(error, event)
+ logger.error({
+ exception: error.class.name,
+ status_code: error.error_code,
+ cluster_id: cluster&.id,
+ application_id: id,
+ class_name: self.class.name,
+ event: event,
+ message: error.message
+ })
+
+ Gitlab::Sentry.track_acceptable_exception(error, extra: { cluster_id: cluster&.id, application_id: id })
+ end
end
end
end
diff --git a/app/models/clusters/instance.rb b/app/models/clusters/instance.rb
index f21dbdf7f26..8c9d9ab9ab1 100644
--- a/app/models/clusters/instance.rb
+++ b/app/models/clusters/instance.rb
@@ -9,5 +9,9 @@ module Clusters
def feature_available?(feature)
::Feature.enabled?(feature, default_enabled: true)
end
+
+ def flipper_id
+ self.class.to_s
+ end
end
end
diff --git a/app/models/clusters/providers/aws.rb b/app/models/clusters/providers/aws.rb
index ae4156896bc..78eb75ddcc0 100644
--- a/app/models/clusters/providers/aws.rb
+++ b/app/models/clusters/providers/aws.rb
@@ -3,12 +3,12 @@
module Clusters
module Providers
class Aws < ApplicationRecord
+ include Gitlab::Utils::StrongMemoize
include Clusters::Concerns::ProviderStatus
self.table_name = 'cluster_providers_aws'
belongs_to :cluster, inverse_of: :provider_aws, class_name: 'Clusters::Cluster'
- belongs_to :created_by_user, class_name: 'User'
default_value_for :region, 'us-east-1'
default_value_for :num_nodes, 3
@@ -42,6 +42,30 @@ module Clusters
session_token: nil
)
end
+
+ def api_client
+ strong_memoize(:api_client) do
+ ::Aws::CloudFormation::Client.new(credentials: credentials, region: region)
+ end
+ end
+
+ def credentials
+ strong_memoize(:credentials) do
+ ::Aws::Credentials.new(access_key_id, secret_access_key, session_token)
+ end
+ end
+
+ def has_rbac_enabled?
+ true
+ end
+
+ def knative_pre_installed?
+ false
+ end
+
+ def created_by_user
+ cluster.user
+ end
end
end
end
diff --git a/app/models/clusters/providers/gcp.rb b/app/models/clusters/providers/gcp.rb
index f871674676f..2ca7d0249dc 100644
--- a/app/models/clusters/providers/gcp.rb
+++ b/app/models/clusters/providers/gcp.rb
@@ -54,6 +54,10 @@ module Clusters
assign_attributes(operation_id: operation_id)
end
+ def has_rbac_enabled?
+ !legacy_abac
+ end
+
def knative_pre_installed?
cloud_run?
end