diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2020-12-17 11:59:07 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2020-12-17 11:59:07 +0000 |
commit | 8b573c94895dc0ac0e1d9d59cf3e8745e8b539ca (patch) | |
tree | 544930fb309b30317ae9797a9683768705d664c4 /app/models/clusters | |
parent | 4b1de649d0168371549608993deac953eb692019 (diff) | |
download | gitlab-ce-8b573c94895dc0ac0e1d9d59cf3e8745e8b539ca.tar.gz |
Add latest changes from gitlab-org/gitlab@13-7-stable-eev13.7.0-rc42
Diffstat (limited to 'app/models/clusters')
-rw-r--r-- | app/models/clusters/agent.rb | 4 | ||||
-rw-r--r-- | app/models/clusters/applications/helm.rb | 35 | ||||
-rw-r--r-- | app/models/clusters/applications/runner.rb | 2 | ||||
-rw-r--r-- | app/models/clusters/cluster.rb | 4 | ||||
-rw-r--r-- | app/models/clusters/concerns/application_core.rb | 8 | ||||
-rw-r--r-- | app/models/clusters/concerns/application_data.rb | 8 | ||||
-rw-r--r-- | app/models/clusters/platforms/kubernetes.rb | 66 |
7 files changed, 82 insertions, 45 deletions
diff --git a/app/models/clusters/agent.rb b/app/models/clusters/agent.rb index 5feb3b0a1e6..c58a3bab1a9 100644 --- a/app/models/clusters/agent.rb +++ b/app/models/clusters/agent.rb @@ -19,5 +19,9 @@ module Clusters with: Gitlab::Regex.cluster_agent_name_regex, message: Gitlab::Regex.cluster_agent_name_regex_message } + + def has_access_to?(requested_project) + requested_project == project + end end end diff --git a/app/models/clusters/applications/helm.rb b/app/models/clusters/applications/helm.rb index d1d6defb713..6f4b273a2c8 100644 --- a/app/models/clusters/applications/helm.rb +++ b/app/models/clusters/applications/helm.rb @@ -4,8 +4,8 @@ require 'openssl' module Clusters module Applications - # DEPRECATED: This model represents the Helm 2 Tiller server, and is no longer being actively used. - # It is being kept around for a potential cleanup of the unused Tiller server. + # DEPRECATED: This model represents the Helm 2 Tiller server. + # It is being kept around to enable the cleanup of the unused Tiller server. class Helm < ApplicationRecord self.table_name = 'clusters_applications_helm' @@ -27,29 +27,11 @@ module Clusters end def set_initial_status - return unless not_installable? - - self.status = status_states[:installable] if cluster&.platform_kubernetes_active? - end - - # It can only be uninstalled if there are no other applications installed - # or with intermitent installation statuses in the database. - def allowed_to_uninstall? - strong_memoize(:allowed_to_uninstall) do - applications = nil - - Clusters::Cluster::APPLICATIONS.each do |application_name, klass| - next if application_name == 'helm' - - extra_apps = Clusters::Applications::Helm.where('EXISTS (?)', klass.select(1).where(cluster_id: cluster_id)) - - applications = applications ? applications.or(extra_apps) : extra_apps - end - - !applications.exists? - end + # The legacy Tiller server is not installable, which is the initial status of every app end + # DEPRECATED: This command is only for development and testing purposes, to simulate + # a Helm 2 cluster with an existing Tiller server. def install_command Gitlab::Kubernetes::Helm::V2::InitCommand.new( name: name, @@ -70,13 +52,6 @@ module Clusters ca_key.present? && ca_cert.present? end - def post_uninstall - cluster.kubeclient.delete_namespace(Gitlab::Kubernetes::Helm::NAMESPACE) - rescue Kubeclient::ResourceNotFoundError - # we actually don't care if the namespace is not present - # since we want to delete it anyway. - end - private def files diff --git a/app/models/clusters/applications/runner.rb b/app/models/clusters/applications/runner.rb index 03f4caccccd..1e41b6f4f31 100644 --- a/app/models/clusters/applications/runner.rb +++ b/app/models/clusters/applications/runner.rb @@ -3,7 +3,7 @@ module Clusters module Applications class Runner < ApplicationRecord - VERSION = '0.22.0' + VERSION = '0.23.0' self.table_name = 'clusters_applications_runners' diff --git a/app/models/clusters/cluster.rb b/app/models/clusters/cluster.rb index 3cf5542ae76..a34d8a6b98d 100644 --- a/app/models/clusters/cluster.rb +++ b/app/models/clusters/cluster.rb @@ -149,8 +149,8 @@ module Clusters scope :for_project_namespace, -> (namespace_id) { joins(:projects).where(projects: { namespace_id: namespace_id }) } scope :with_application_prometheus, -> { includes(:application_prometheus).joins(:application_prometheus) } - scope :with_project_alert_service_data, -> (project_ids) do - conditions = { projects: { alerts_service: [:data] } } + scope :with_project_http_integrations, -> (project_ids) do + conditions = { projects: :alert_management_http_integrations } includes(conditions).joins(conditions).where(projects: { id: project_ids }) end diff --git a/app/models/clusters/concerns/application_core.rb b/app/models/clusters/concerns/application_core.rb index b82b1887308..ad6699daa78 100644 --- a/app/models/clusters/concerns/application_core.rb +++ b/app/models/clusters/concerns/application_core.rb @@ -62,6 +62,14 @@ module Clusters end end + def uninstall_command + helm_command_module::DeleteCommand.new( + name: name, + rbac: cluster.platform_kubernetes_rbac?, + files: files + ) + end + def prepare_uninstall # Override if your application needs any action before # being uninstalled by Helm diff --git a/app/models/clusters/concerns/application_data.rb b/app/models/clusters/concerns/application_data.rb index 00aeb7669ad..a022f174faf 100644 --- a/app/models/clusters/concerns/application_data.rb +++ b/app/models/clusters/concerns/application_data.rb @@ -3,14 +3,6 @@ module Clusters module Concerns module ApplicationData - def uninstall_command - helm_command_module::DeleteCommand.new( - name: name, - rbac: cluster.platform_kubernetes_rbac?, - files: files - ) - end - def repository nil end diff --git a/app/models/clusters/platforms/kubernetes.rb b/app/models/clusters/platforms/kubernetes.rb index b85a902d58b..84de5828491 100644 --- a/app/models/clusters/platforms/kubernetes.rb +++ b/app/models/clusters/platforms/kubernetes.rb @@ -94,9 +94,20 @@ module Clusters return unless enabled? pods = read_pods(environment.deployment_namespace) + deployments = read_deployments(environment.deployment_namespace) - # extract_relevant_pod_data avoids uploading all the pod info into ReactiveCaching - { pods: extract_relevant_pod_data(pods) } + ingresses = if ::Feature.enabled?(:canary_ingress_weight_control, environment.project, default_enabled: true) + read_ingresses(environment.deployment_namespace) + else + [] + end + + # extract only the data required for display to avoid unnecessary caching + { + pods: extract_relevant_pod_data(pods), + deployments: extract_relevant_deployment_data(deployments), + ingresses: extract_relevant_ingress_data(ingresses) + } end def terminals(environment, data) @@ -109,6 +120,25 @@ module Clusters @kubeclient ||= build_kube_client! end + def rollout_status(environment, data) + project = environment.project + + deployments = filter_by_project_environment(data[:deployments], project.full_path_slug, environment.slug) + pods = filter_by_project_environment(data[:pods], project.full_path_slug, environment.slug) + ingresses = data[:ingresses].presence || [] + + ::Gitlab::Kubernetes::RolloutStatus.from_deployments(*deployments, pods_attrs: pods, ingresses: ingresses) + end + + def ingresses(namespace) + ingresses = read_ingresses(namespace) + ingresses.map { |ingress| ::Gitlab::Kubernetes::Ingress.new(ingress) } + end + + def patch_ingress(namespace, ingress, data) + kubeclient.patch_ingress(ingress.name, data, namespace) + end + private def default_namespace(project, environment_name:) @@ -140,6 +170,18 @@ module Clusters [] end + def read_deployments(namespace) + kubeclient.get_deployments(namespace: namespace).as_json + rescue Kubeclient::ResourceNotFoundError + [] + end + + def read_ingresses(namespace) + kubeclient.get_ingresses(namespace: namespace).as_json + rescue Kubeclient::ResourceNotFoundError + [] + end + def build_kube_client! raise "Incomplete settings" unless api_url @@ -231,8 +273,24 @@ module Clusters } end end + + def extract_relevant_deployment_data(deployments) + deployments.map do |deployment| + { + 'metadata' => deployment.fetch('metadata', {}).slice('name', 'generation', 'labels', 'annotations'), + 'spec' => deployment.fetch('spec', {}).slice('replicas'), + 'status' => deployment.fetch('status', {}).slice('observedGeneration') + } + end + end + + def extract_relevant_ingress_data(ingresses) + ingresses.map do |ingress| + { + 'metadata' => ingress.fetch('metadata', {}).slice('name', 'labels', 'annotations') + } + end + end end end end - -Clusters::Platforms::Kubernetes.prepend_if_ee('EE::Clusters::Platforms::Kubernetes') |