diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2019-11-27 18:06:30 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2019-11-27 18:06:30 +0000 |
commit | 20758bc3b33c04121d214cb89646baf880037a27 (patch) | |
tree | 66b821724d9e050002a956acefc1111c2ef0feab | |
parent | a98649b71085bcd21af674a47d6a746336c56a65 (diff) | |
download | gitlab-ce-20758bc3b33c04121d214cb89646baf880037a27.tar.gz |
Add latest changes from gitlab-org/gitlab@master
19 files changed, 289 insertions, 116 deletions
diff --git a/app/assets/javascripts/monitoring/components/dashboard.vue b/app/assets/javascripts/monitoring/components/dashboard.vue index 26e2c2568c1..91aa5cf0e7b 100644 --- a/app/assets/javascripts/monitoring/components/dashboard.vue +++ b/app/assets/javascripts/monitoring/components/dashboard.vue @@ -22,8 +22,7 @@ import MonitorTimeSeriesChart from './charts/time_series.vue'; import MonitorSingleStatChart from './charts/single_stat.vue'; import GraphGroup from './graph_group.vue'; import EmptyState from './empty_state.vue'; -import TrackEventDirective from '~/vue_shared/directives/track_event'; -import { getTimeDiff, isValidDate, downloadCSVOptions, generateLinkToChartOptions } from '../utils'; +import { getTimeDiff, isValidDate } from '../utils'; export default { components: { @@ -44,7 +43,6 @@ export default { directives: { GlModal: GlModalDirective, GlTooltip: GlTooltipDirective, - TrackEvent: TrackEventDirective, }, props: { externalDashboardUrl: { @@ -300,8 +298,6 @@ export default { onDateTimePickerApply(timeWindowUrlParams) { return redirectTo(mergeUrlParams(timeWindowUrlParams, window.location.href)); }, - downloadCSVOptions, - generateLinkToChartOptions, }, addMetric: { title: s__('Metrics|Add metric'), diff --git a/app/assets/stylesheets/page_bundles/ide.scss b/app/assets/stylesheets/page_bundles/ide.scss index ba126d59eef..977fc8329b6 100644 --- a/app/assets/stylesheets/page_bundles/ide.scss +++ b/app/assets/stylesheets/page_bundles/ide.scss @@ -883,6 +883,15 @@ $ide-commit-header-height: 48px; margin-right: $ide-tree-padding; border-bottom: 1px solid $white-dark; + svg { + color: $gray-700; + + &:focus, + &:hover { + color: $blue-600; + } + } + .ide-new-btn { margin-left: auto; } @@ -899,6 +908,11 @@ $ide-commit-header-height: 48px; .dropdown-menu-toggle { svg { vertical-align: middle; + color: $gray-700; + + &:hover { + color: $gray-700; + } } &:hover { diff --git a/app/assets/stylesheets/pages/issues.scss b/app/assets/stylesheets/pages/issues.scss index a37cbda8558..61542e89828 100644 --- a/app/assets/stylesheets/pages/issues.scss +++ b/app/assets/stylesheets/pages/issues.scss @@ -21,16 +21,11 @@ margin-bottom: 2px; } - .issue-labels { + .issue-labels, + .author-link { display: inline-block; } - .issuable-meta { - .author-link { - display: inline-block; - } - } - .icon-merge-request-unmerged { height: 13px; margin-bottom: 3px; @@ -53,16 +48,6 @@ margin-right: 15px; } -.issues_content { - .title { - height: 40px; - } - - form { - margin: 0; - } -} - form.edit-issue { margin: 0; } @@ -79,10 +64,6 @@ ul.related-merge-requests > li { margin-left: 5px; } - .row_title { - vertical-align: bottom; - } - gl-emoji { font-size: 1em; } @@ -93,10 +74,6 @@ ul.related-merge-requests > li { font-weight: $gl-font-weight-bold; } -.merge-request-id { - display: inline-block; -} - .merge-request-status { &.merged { color: $blue-500; @@ -118,11 +95,7 @@ ul.related-merge-requests > li { border-color: $issues-today-border; } - &.closed { - background: $gray-light; - border-color: $border-color; - } - + &.closed, &.merged { background: $gray-light; border-color: $border-color; @@ -160,9 +133,12 @@ ul.related-merge-requests > li { padding-bottom: 37px; } -.issues-nav-controls { +.issues-nav-controls, +.new-branch-col { font-size: 0; +} +.issues-nav-controls { .btn-group:empty { display: none; } @@ -198,8 +174,6 @@ ul.related-merge-requests > li { } .new-branch-col { - font-size: 0; - .discussion-filter-container { &:not(:only-child) { margin-right: $gl-padding-8; @@ -297,11 +271,11 @@ ul.related-merge-requests > li { padding-top: 0; align-self: center; } + } - .create-mr-dropdown-wrap { - .btn-group:not(.hidden) { - display: inline-flex; - } + .create-mr-dropdown-wrap { + .btn-group:not(.hidden) { + display: inline-flex; } } } diff --git a/app/helpers/projects_helper.rb b/app/helpers/projects_helper.rb index 466c782fc77..ff013f3f7ec 100644 --- a/app/helpers/projects_helper.rb +++ b/app/helpers/projects_helper.rb @@ -114,8 +114,10 @@ module ProjectsHelper source = visible_fork_source(project) if source - _('This will remove the fork relationship between this project and %{fork_source}.') % + msg = _('This will remove the fork relationship between this project and %{fork_source}.') % { fork_source: link_to(source.full_name, project_path(source)) } + + msg.html_safe else _('This will remove the fork relationship between this project and other projects in the fork network.') end diff --git a/app/models/active_session.rb b/app/models/active_session.rb index 00192b1da59..08352caca07 100644 --- a/app/models/active_session.rb +++ b/app/models/active_session.rb @@ -4,6 +4,7 @@ class ActiveSession include ActiveModel::Model SESSION_BATCH_SIZE = 200 + ALLOWED_NUMBER_OF_ACTIVE_SESSIONS = 100 attr_accessor :created_at, :updated_at, :session_id, :ip_address, @@ -65,21 +66,22 @@ class ActiveSession def self.destroy(user, session_id) Gitlab::Redis::SharedState.with do |redis| - redis.srem(lookup_key_name(user.id), session_id) + destroy_sessions(redis, user, [session_id]) + end + end - deleted_keys = redis.del(key_name(user.id, session_id)) + def self.destroy_sessions(redis, user, session_ids) + key_names = session_ids.map {|session_id| key_name(user.id, session_id) } + session_names = session_ids.map {|session_id| "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}" } - # only allow deleting the devise session if we could actually find a - # related active session. this prevents another user from deleting - # someone else's session. - if deleted_keys > 0 - redis.del("#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}") - end - end + redis.srem(lookup_key_name(user.id), session_ids) + redis.del(key_names) + redis.del(session_names) end def self.cleanup(user) Gitlab::Redis::SharedState.with do |redis| + clean_up_old_sessions(redis, user) cleaned_up_lookup_entries(redis, user) end end @@ -118,19 +120,39 @@ class ActiveSession end end - def self.raw_active_session_entries(session_ids, user_id) + def self.raw_active_session_entries(redis, session_ids, user_id) return [] if session_ids.empty? - Gitlab::Redis::SharedState.with do |redis| - entry_keys = session_ids.map { |session_id| key_name(user_id, session_id) } + entry_keys = session_ids.map { |session_id| key_name(user_id, session_id) } + + redis.mget(entry_keys) + end - redis.mget(entry_keys) + def self.active_session_entries(session_ids, user_id, redis) + return [] if session_ids.empty? + + entry_keys = raw_active_session_entries(redis, session_ids, user_id) + + entry_keys.map do |raw_session| + Marshal.load(raw_session) # rubocop:disable Security/MarshalLoad end end + def self.clean_up_old_sessions(redis, user) + session_ids = session_ids_for_user(user.id) + + return if session_ids.count <= ALLOWED_NUMBER_OF_ACTIVE_SESSIONS + + # remove sessions if there are more than ALLOWED_NUMBER_OF_ACTIVE_SESSIONS. + sessions = active_session_entries(session_ids, user.id, redis) + sessions.sort_by! {|session| session.updated_at }.reverse! + sessions = sessions[ALLOWED_NUMBER_OF_ACTIVE_SESSIONS..-1].map { |session| session.session_id } + destroy_sessions(redis, user, sessions) + end + def self.cleaned_up_lookup_entries(redis, user) session_ids = session_ids_for_user(user.id) - entries = raw_active_session_entries(session_ids, user.id) + entries = raw_active_session_entries(redis, session_ids, user.id) # remove expired keys. # only the single key entries are automatically expired by redis, the diff --git a/changelogs/unreleased/31611-limit-the-number-of-stored-sessions-per-user.yml b/changelogs/unreleased/31611-limit-the-number-of-stored-sessions-per-user.yml new file mode 100644 index 00000000000..aac9e94e1cf --- /dev/null +++ b/changelogs/unreleased/31611-limit-the-number-of-stored-sessions-per-user.yml @@ -0,0 +1,5 @@ +--- +title: Resolve Limit the number of stored sessions per user +merge_request: 19325 +author: +type: added diff --git a/changelogs/unreleased/34377-design-view-download-single-issue-design-image.yml b/changelogs/unreleased/34377-design-view-download-single-issue-design-image.yml new file mode 100644 index 00000000000..67e37e39dc0 --- /dev/null +++ b/changelogs/unreleased/34377-design-view-download-single-issue-design-image.yml @@ -0,0 +1,5 @@ +--- +title: 'Resolve Design view: Download single issue design image' +merge_request: 20703 +author: +type: added diff --git a/changelogs/unreleased/cleanup-monitoring-dashboard-unused-methods.yml b/changelogs/unreleased/cleanup-monitoring-dashboard-unused-methods.yml new file mode 100644 index 00000000000..f4dac5e2e0f --- /dev/null +++ b/changelogs/unreleased/cleanup-monitoring-dashboard-unused-methods.yml @@ -0,0 +1,5 @@ +--- +title: Removed unused methods in monitoring dashboard +merge_request: 20819 +author: +type: other diff --git a/changelogs/unreleased/fix-fork-link-display-bug.yml b/changelogs/unreleased/fix-fork-link-display-bug.yml new file mode 100644 index 00000000000..2a8f2b1a38d --- /dev/null +++ b/changelogs/unreleased/fix-fork-link-display-bug.yml @@ -0,0 +1,5 @@ +--- +title: Fix a display bug in the fork removal description message +merge_request: 20843 +author: +type: fixed diff --git a/doc/administration/high_availability/README.md b/doc/administration/high_availability/README.md index 81719ba51da..75874b9b3a9 100644 --- a/doc/administration/high_availability/README.md +++ b/doc/administration/high_availability/README.md @@ -219,13 +219,43 @@ Note that your exact needs may be more, depending on your workload. Your workload is influenced by factors such as - but not limited to - how active your users are, how much automation you use, mirroring, and repo/change size. +### 5,000 User Configuration + +- **Supported Users (approximate):** 50,000 +- **Test RPS Rates:** API: 100 RPS, Web: 10 RPS, Git: 10 RPS +- **Status:** Work-in-progress +- **Known Issues:** For the latest list of known performance issues head +[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues). + +NOTE: **Note:** This architecture is a work-in-progress of the work so far. The +Quality team will be certifying this environment in late 2019 or early 2020. The specifications +may be adjusted prior to certification based on performance testing. + +| Service | Nodes | Configuration | GCP type | +| ----------------------------|-------|-----------------------|---------------| +| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 | +| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | +| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | +| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 8 vCPU, 30GB Memory | n1-standard-8 | +| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | +| Redis Persistent + Sentinel | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 | +| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 | +| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | +| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 | +| S3 Object Storage[^3] . | - | - | - | +| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | +| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | +| Internal load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 | + +NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud +vendors a best effort like for like can be used. + ### 10,000 User Configuration - **Supported Users (approximate):** 10,000 - **Test RPS Rates:** API: 200 RPS, Web: 20 RPS, Git: 20 RPS -- **Known Issues:** While validating the reference architectures, slow API -endpoints were discovered. For details, see the related issues list in -[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125). +- **Known Issues:** For the latest list of known performance issues head +[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues). | Service | Nodes | Configuration | GCP type | | ----------------------------|-------|-----------------------|---------------| @@ -250,9 +280,8 @@ vendors a best effort like for like can be used. - **Supported Users (approximate):** 25,000 - **Test RPS Rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS -- **Known Issues:** While validating the reference architectures, slow API -endpoints were discovered. For details, see the related issues list in -[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125). +- **Known Issues:** For the latest list of known performance issues head +[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues). | Service | Nodes | Configuration | GCP type | | ----------------------------|-------|-----------------------|---------------| @@ -277,9 +306,8 @@ vendors a best effort like for like can be used. - **Supported Users (approximate):** 50,000 - **Test RPS Rates:** API: 1000 RPS, Web: 100 RPS, Git: 100 RPS -- **Known Issues:** While validating the reference architectures, slow API -endpoints were discovered. For details, see the related issues list in -[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125). +- **Known Issues:** For the latest list of known performance issues head +[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues). | Service | Nodes | Configuration | GCP type | | ----------------------------|-------|-----------------------|---------------| @@ -300,15 +328,16 @@ endpoints were discovered. For details, see the related issues list in NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud vendors a best effort like for like can be used. -[^1]: Gitaly node requirements are dependent on customer data. We recommend 2 - nodes as an absolute minimum for performance at the 10,000 and 25,000 user - scale and 4 nodes as an absolute minimum at the 50,000 user scale, but - additional nodes should be considered in conjunction with a review of - project counts and sizes. +[^1]: Gitaly node requirements are dependent on customer data, specifically the number of + projects and their sizes. We recommend 2 nodes as an absolute minimum for HA environments + and at least 4 nodes should be used when supporting 50,000 or more users. + We recommend that each Gitaly node should store no more than 5TB of data. + Additional nodes should be considered in conjunction with a review of expected + data size and spread based on the recommendations above. [^2]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/) as the load balancer. However other reputable load balancers with similar feature sets - should also work here but be aware these aren't validated. + should also work instead but be aware these aren't validated. [^3]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage where possible over NFS due to better performance and availability. Several types of objects diff --git a/doc/user/profile/active_sessions.md b/doc/user/profile/active_sessions.md index fe2eeebdb99..8a70799f5a0 100644 --- a/doc/user/profile/active_sessions.md +++ b/doc/user/profile/active_sessions.md @@ -18,6 +18,9 @@ review the sessions, and revoke any you don't recognize. ![Active sessions list](img/active_sessions_list.png) +CAUTION: **Caution:** +It is currently possible to have 100 active sessions at once. If the number of active sessions exceed 100, the oldest ones will be deleted. + <!-- ## Troubleshooting Include any troubleshooting steps that you can foresee. If you know beforehand what issues diff --git a/lib/quality/kubernetes_client.rb b/lib/quality/kubernetes_client.rb index cc899bf9374..db21c0b013b 100644 --- a/lib/quality/kubernetes_client.rb +++ b/lib/quality/kubernetes_client.rb @@ -4,6 +4,7 @@ require_relative '../gitlab/popen' unless defined?(Gitlab::Popen) module Quality class KubernetesClient + RESOURCE_LIST = 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd' CommandFailedError = Class.new(StandardError) attr_reader :namespace @@ -13,6 +14,13 @@ module Quality end def cleanup(release_name:, wait: true) + delete_by_selector(release_name: release_name, wait: wait) + delete_by_matching_name(release_name: release_name) + end + + private + + def delete_by_selector(release_name:, wait:) selector = case release_name when String %(-l release="#{release_name}") @@ -23,9 +31,9 @@ module Quality end command = [ - %(--namespace "#{namespace}"), 'delete', - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa', + RESOURCE_LIST, + %(--namespace "#{namespace}"), '--now', '--ignore-not-found', '--include-uninitialized', @@ -36,7 +44,29 @@ module Quality run_command(command) end - private + def delete_by_matching_name(release_name:) + resource_names = raw_resource_names + command = [ + 'delete', + %(--namespace "#{namespace}") + ] + + Array(release_name).each do |release| + resource_names + .select { |resource_name| resource_name.include?(release) } + .each { |matching_resource| run_command(command + [matching_resource]) } + end + end + + def raw_resource_names + command = [ + 'get', + RESOURCE_LIST, + %(--namespace "#{namespace}"), + '-o custom-columns=NAME:.metadata.name' + ] + run_command(command).lines.map(&:strip) + end def run_command(command) final_command = ['kubectl', *command].join(' ') diff --git a/lib/tasks/gitlab/cleanup.rake b/lib/tasks/gitlab/cleanup.rake index 0a0ee7b4bfa..63f5d7f2740 100644 --- a/lib/tasks/gitlab/cleanup.rake +++ b/lib/tasks/gitlab/cleanup.rake @@ -92,7 +92,7 @@ namespace :gitlab do lookup_key_count = redis.scard(key) session_ids = ActiveSession.session_ids_for_user(user_id) - entries = ActiveSession.raw_active_session_entries(session_ids, user_id) + entries = ActiveSession.raw_active_session_entries(redis, session_ids, user_id) session_ids_and_entries = session_ids.zip(entries) inactive_session_ids = session_ids_and_entries.map do |session_id, session| diff --git a/scripts/review_apps/review-apps.sh b/scripts/review_apps/review-apps.sh index 1c33bff719d..62360dfe298 100755 --- a/scripts/review_apps/review-apps.sh +++ b/scripts/review_apps/review-apps.sh @@ -48,11 +48,31 @@ function delete_release() { return fi - echoinfo "Deleting release '${release}'..." true + helm_delete_release "${namespace}" "${release}" + kubectl_cleanup_release "${namespace}" "${release}" +} + +function helm_delete_release() { + local namespace="${1}" + local release="${2}" + + echoinfo "Deleting Helm release '${release}'..." true helm delete --tiller-namespace "${namespace}" --purge "${release}" } +function kubectl_cleanup_release() { + local namespace="${1}" + local release="${2}" + + echoinfo "Deleting all K8s resources matching '${release}'..." true + kubectl --namespace "${namespace}" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd 2>&1 \ + | grep "${release}" \ + | awk '{print $1}' \ + | xargs kubectl --namespace "${namespace}" delete \ + || true +} + function delete_failed_release() { local namespace="${KUBE_NAMESPACE}" local release="${CI_ENVIRONMENT_SLUG}" diff --git a/scripts/trigger-build b/scripts/trigger-build index 74c1df258c0..537b2692b27 100755 --- a/scripts/trigger-build +++ b/scripts/trigger-build @@ -71,7 +71,7 @@ module Trigger # Can be overridden def version_param_value(version_file) - File.read(version_file).strip + ENV[version_file]&.strip || File.read(version_file).strip end def variables diff --git a/spec/lib/gitlab/health_checks/probes/collection_spec.rb b/spec/lib/gitlab/health_checks/probes/collection_spec.rb index 33efc640257..d8c411fa27b 100644 --- a/spec/lib/gitlab/health_checks/probes/collection_spec.rb +++ b/spec/lib/gitlab/health_checks/probes/collection_spec.rb @@ -5,7 +5,7 @@ require 'spec_helper' describe Gitlab::HealthChecks::Probes::Collection do let(:readiness) { described_class.new(*checks) } - describe '#call' do + describe '#execute' do subject { readiness.execute } context 'with all checks' do diff --git a/spec/lib/quality/kubernetes_client_spec.rb b/spec/lib/quality/kubernetes_client_spec.rb index 5bac102ac41..59d4a977d5e 100644 --- a/spec/lib/quality/kubernetes_client_spec.rb +++ b/spec/lib/quality/kubernetes_client_spec.rb @@ -5,15 +5,27 @@ require 'fast_spec_helper' RSpec.describe Quality::KubernetesClient do let(:namespace) { 'review-apps-ee' } let(:release_name) { 'my-release' } + let(:pod_for_release) { "pod-my-release-abcd" } + let(:raw_resource_names_str) { "NAME\nfoo\n#{pod_for_release}\nbar" } + let(:raw_resource_names) { raw_resource_names_str.lines.map(&:strip) } subject { described_class.new(namespace: namespace) } + describe 'RESOURCE_LIST' do + it 'returns the correct list of resources separated by commas' do + expect(described_class::RESOURCE_LIST).to eq('ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd') + end + end + describe '#cleanup' do + before do + allow(subject).to receive(:raw_resource_names).and_return(raw_resource_names) + end + it 'raises an error if the Kubernetes command fails' do expect(Gitlab::Popen).to receive(:popen_with_detail) - .with([%(kubectl --namespace "#{namespace}" delete ) \ - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \ - "--now --ignore-not-found --include-uninitialized --wait=true -l release=\"#{release_name}\""]) + .with(["kubectl delete #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l release="#{release_name}")]) .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false))) expect { subject.cleanup(release_name: release_name) }.to raise_error(described_class::CommandFailedError) @@ -21,9 +33,12 @@ RSpec.describe Quality::KubernetesClient do it 'calls kubectl with the correct arguments' do expect(Gitlab::Popen).to receive(:popen_with_detail) - .with([%(kubectl --namespace "#{namespace}" delete ) \ - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \ - "--now --ignore-not-found --include-uninitialized --wait=true -l release=\"#{release_name}\""]) + .with(["kubectl delete #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l release="#{release_name}")]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) + + expect(Gitlab::Popen).to receive(:popen_with_detail) + .with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})]) .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) # We're not verifying the output here, just silencing it @@ -35,20 +50,22 @@ RSpec.describe Quality::KubernetesClient do it 'raises an error if the Kubernetes command fails' do expect(Gitlab::Popen).to receive(:popen_with_detail) - .with([%(kubectl --namespace "#{namespace}" delete ) \ - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \ - "--now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})'"]) - .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false))) + .with(["kubectl delete #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})')]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false))) expect { subject.cleanup(release_name: release_name) }.to raise_error(described_class::CommandFailedError) end it 'calls kubectl with the correct arguments' do expect(Gitlab::Popen).to receive(:popen_with_detail) - .with([%(kubectl --namespace "#{namespace}" delete ) \ - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \ - "--now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})'"]) - .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) + .with(["kubectl delete #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})')]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) + + expect(Gitlab::Popen).to receive(:popen_with_detail) + .with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) # We're not verifying the output here, just silencing it expect { subject.cleanup(release_name: release_name) }.to output.to_stdout @@ -58,24 +75,37 @@ RSpec.describe Quality::KubernetesClient do context 'with `wait: false`' do it 'raises an error if the Kubernetes command fails' do expect(Gitlab::Popen).to receive(:popen_with_detail) - .with([%(kubectl --namespace "#{namespace}" delete ) \ - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \ - "--now --ignore-not-found --include-uninitialized --wait=false -l release=\"#{release_name}\""]) - .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false))) + .with(["kubectl delete #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=false -l release="#{release_name}")]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false))) expect { subject.cleanup(release_name: release_name, wait: false) }.to raise_error(described_class::CommandFailedError) end it 'calls kubectl with the correct arguments' do expect(Gitlab::Popen).to receive(:popen_with_detail) - .with([%(kubectl --namespace "#{namespace}" delete ) \ - 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \ - "--now --ignore-not-found --include-uninitialized --wait=false -l release=\"#{release_name}\""]) - .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) + .with(["kubectl delete #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=false -l release="#{release_name}")]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) + + expect(Gitlab::Popen).to receive(:popen_with_detail) + .with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})]) + .and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true))) # We're not verifying the output here, just silencing it expect { subject.cleanup(release_name: release_name, wait: false) }.to output.to_stdout end end end + + describe '#raw_resource_names' do + it 'calls kubectl to retrieve the resource names' do + expect(Gitlab::Popen).to receive(:popen_with_detail) + .with(["kubectl get #{described_class::RESOURCE_LIST} " + + %(--namespace "#{namespace}" -o custom-columns=NAME:.metadata.name)]) + .and_return(Gitlab::Popen::Result.new([], raw_resource_names_str, '', double(success?: true))) + + expect(subject.__send__(:raw_resource_names)).to eq(raw_resource_names) + end + end end diff --git a/spec/models/active_session_spec.rb b/spec/models/active_session_spec.rb index b1f93dc7189..07f716c4f38 100644 --- a/spec/models/active_session_spec.rb +++ b/spec/models/active_session_spec.rb @@ -242,23 +242,13 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do expect(redis.scan_each(match: "session:gitlab:*").to_a).to be_empty end end - - it 'does not remove the devise session if the active session could not be found' do - Gitlab::Redis::SharedState.with do |redis| - redis.set("session:gitlab:6919a6f1bb119dd7396fadc38fd18d0d", '') - end - - other_user = create(:user) - - ActiveSession.destroy(other_user, request.session.id) - - Gitlab::Redis::SharedState.with do |redis| - expect(redis.scan_each(match: "session:gitlab:*").to_a).not_to be_empty - end - end end describe '.cleanup' do + before do + stub_const("ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS", 5) + end + it 'removes obsolete lookup entries' do Gitlab::Redis::SharedState.with do |redis| redis.set("session:user:gitlab:#{user.id}:6919a6f1bb119dd7396fadc38fd18d0d", '') @@ -276,5 +266,47 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do it 'does not bail if there are no lookup entries' do ActiveSession.cleanup(user) end + + context 'cleaning up old sessions' do + let(:max_number_of_sessions_plus_one) { ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS + 1 } + let(:max_number_of_sessions_plus_two) { ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS + 2 } + + before do + Gitlab::Redis::SharedState.with do |redis| + (1..max_number_of_sessions_plus_two).each do |number| + redis.set( + "session:user:gitlab:#{user.id}:#{number}", + Marshal.dump(ActiveSession.new(session_id: "#{number}", updated_at: number.days.ago)) + ) + redis.sadd( + "session:lookup:user:gitlab:#{user.id}", + "#{number}" + ) + end + end + end + + it 'removes obsolete active sessions entries' do + ActiveSession.cleanup(user) + + Gitlab::Redis::SharedState.with do |redis| + sessions = redis.scan_each(match: "session:user:gitlab:#{user.id}:*").to_a + + expect(sessions.count).to eq(ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS) + expect(sessions).not_to include("session:user:gitlab:#{user.id}:#{max_number_of_sessions_plus_one}", "session:user:gitlab:#{user.id}:#{max_number_of_sessions_plus_two}") + end + end + + it 'removes obsolete lookup entries' do + ActiveSession.cleanup(user) + + Gitlab::Redis::SharedState.with do |redis| + lookup_entries = redis.smembers("session:lookup:user:gitlab:#{user.id}") + + expect(lookup_entries.count).to eq(ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS) + expect(lookup_entries).not_to include(max_number_of_sessions_plus_one.to_s, max_number_of_sessions_plus_two.to_s) + end + end + end end end diff --git a/spec/views/projects/edit.html.haml_spec.rb b/spec/views/projects/edit.html.haml_spec.rb index 40927a22dc4..8005b549838 100644 --- a/spec/views/projects/edit.html.haml_spec.rb +++ b/spec/views/projects/edit.html.haml_spec.rb @@ -53,6 +53,7 @@ describe 'projects/edit' do render expect(rendered).to have_content('Remove fork relationship') + expect(rendered).to have_link(source_project.full_name, href: project_path(source_project)) end it 'hides the fork relationship settings from an unauthorized user' do @@ -78,7 +79,7 @@ describe 'projects/edit' do render expect(rendered).to have_content('Remove fork relationship') - expect(rendered).to have_content(source_project.full_name) + expect(rendered).to have_link(source_project.full_name, href: project_path(source_project)) end end end |