diff options
44 files changed, 993 insertions, 63 deletions
diff --git a/app/assets/javascripts/clusters/components/applications.vue b/app/assets/javascripts/clusters/components/applications.vue index 665a9c77822..489615f1f78 100644 --- a/app/assets/javascripts/clusters/components/applications.vue +++ b/app/assets/javascripts/clusters/components/applications.vue @@ -84,6 +84,9 @@ export default { ingressExternalIp() { return this.applications.ingress.externalIp; }, + certManagerInstalled() { + return this.applications.cert_manager.status === APPLICATION_STATUS.INSTALLED; + }, ingressDescription() { const extraCostParagraph = sprintf( _.escape( @@ -130,9 +133,9 @@ export default { return sprintf( _.escape( s__( - `ClusterIntegration|cert-manager is a native Kubernetes certificate management controller that helps with issuing certificates. - Installing cert-manager on your cluster will issue a certificate by %{letsEncrypt} and ensure that certificates - are valid and up to date.`, + `ClusterIntegration|Cert-Manager is a native Kubernetes certificate management controller that helps with issuing certificates. + Installing Cert-Manager on your cluster will issue a certificate by %{letsEncrypt} and ensure that certificates + are valid and up-to-date.`, ), ), { @@ -259,6 +262,16 @@ export default { </span> </div> <input v-else type="text" class="form-control js-ip-address" readonly value="?" /> + <p class="form-text text-muted"> + {{ + s__(`ClusterIntegration|Point a wildcard DNS to this + generated IP address in order to access + your application after it has been deployed.`) + }} + <a :href="ingressDnsHelpPath" target="_blank" rel="noopener noreferrer"> + {{ __('More information') }} + </a> + </p> </div> <p v-if="!ingressExternalIp" class="settings-message js-no-ip-message"> @@ -272,17 +285,6 @@ export default { {{ __('More information') }} </a> </p> - - <p> - {{ - s__(`ClusterIntegration|Point a wildcard DNS to this - generated IP address in order to access - your application after it has been deployed.`) - }} - <a :href="ingressDnsHelpPath" target="_blank" rel="noopener noreferrer"> - {{ __('More information') }} - </a> - </p> </template> <div v-html="ingressDescription"></div> </div> @@ -295,10 +297,41 @@ export default { :status-reason="applications.cert_manager.statusReason" :request-status="applications.cert_manager.requestStatus" :request-reason="applications.cert_manager.requestReason" + :install-application-request-params="{ email: applications.cert_manager.email }" :disabled="!helmInstalled" title-link="https://cert-manager.readthedocs.io/en/latest/#" > - <div slot="description" v-html="certManagerDescription"></div> + <template> + <div slot="description"> + <p v-html="certManagerDescription"></p> + <div class="form-group"> + <label for="cert-manager-issuer-email"> + {{ s__('ClusterIntegration|Issuer Email') }} + </label> + <div class="input-group"> + <input + v-model="applications.cert_manager.email" + :readonly="certManagerInstalled" + type="text" + class="form-control js-email" + /> + </div> + <p class="form-text text-muted"> + {{ + s__(`ClusterIntegration|Issuers represent a certificate authority. + You must provide an email address for your Issuer. `) + }} + <a + href="http://docs.cert-manager.io/en/latest/reference/issuers.html?highlight=email" + target="_blank" + rel="noopener noreferrer" + > + {{ __('More information') }} + </a> + </p> + </div> + </div> + </template> </application-row> <application-row v-if="isProjectCluster" @@ -381,16 +414,17 @@ export default { /> </span> </div> + + <p v-if="ingressInstalled" class="form-text text-muted"> + {{ + s__(`ClusterIntegration|Replace this with your own hostname if you want. + If you do so, point hostname to Ingress IP Address from above.`) + }} + <a :href="ingressDnsHelpPath" target="_blank" rel="noopener noreferrer"> + {{ __('More information') }} + </a> + </p> </div> - <p v-if="ingressInstalled"> - {{ - s__(`ClusterIntegration|Replace this with your own hostname if you want. - If you do so, point hostname to Ingress IP Address from above.`) - }} - <a :href="ingressDnsHelpPath" target="_blank" rel="noopener noreferrer"> - {{ __('More information') }} - </a> - </p> </template> </div> </application-row> diff --git a/app/assets/javascripts/clusters/constants.js b/app/assets/javascripts/clusters/constants.js index 15cf4a56138..e31afadf186 100644 --- a/app/assets/javascripts/clusters/constants.js +++ b/app/assets/javascripts/clusters/constants.js @@ -24,3 +24,4 @@ export const REQUEST_FAILURE = 'request-failure'; export const INGRESS = 'ingress'; export const JUPYTER = 'jupyter'; export const KNATIVE = 'knative'; +export const CERT_MANAGER = 'cert_manager'; diff --git a/app/assets/javascripts/clusters/stores/clusters_store.js b/app/assets/javascripts/clusters/stores/clusters_store.js index 2d69da8eaec..c750daab112 100644 --- a/app/assets/javascripts/clusters/stores/clusters_store.js +++ b/app/assets/javascripts/clusters/stores/clusters_store.js @@ -1,5 +1,5 @@ import { s__ } from '../../locale'; -import { INGRESS, JUPYTER, KNATIVE } from '../constants'; +import { INGRESS, JUPYTER, KNATIVE, CERT_MANAGER } from '../constants'; export default class ClusterStore { constructor() { @@ -30,6 +30,7 @@ export default class ClusterStore { statusReason: null, requestStatus: null, requestReason: null, + email: null, }, runner: { title: s__('ClusterIntegration|GitLab Runner'), @@ -103,6 +104,9 @@ export default class ClusterStore { if (appId === INGRESS) { this.state.applications.ingress.externalIp = serverAppEntry.external_ip; + } else if (appId === CERT_MANAGER) { + this.state.applications.cert_manager.email = + this.state.applications.cert_manager.email || serverAppEntry.email; } else if (appId === JUPYTER) { this.state.applications.jupyter.hostname = serverAppEntry.hostname || diff --git a/app/controllers/clusters/applications_controller.rb b/app/controllers/clusters/applications_controller.rb index 250f42f3096..c4e7fc950f9 100644 --- a/app/controllers/clusters/applications_controller.rb +++ b/app/controllers/clusters/applications_controller.rb @@ -23,6 +23,6 @@ class Clusters::ApplicationsController < Clusters::BaseController end def create_cluster_application_params - params.permit(:application, :hostname) + params.permit(:application, :hostname, :email) end end diff --git a/app/models/clusters/applications/cert_manager.rb b/app/models/clusters/applications/cert_manager.rb index 077e2bda143..74ef7c7e145 100644 --- a/app/models/clusters/applications/cert_manager.rb +++ b/app/models/clusters/applications/cert_manager.rb @@ -14,6 +14,10 @@ module Clusters default_value_for :version, VERSION + default_value_for :email do |cert_manager| + cert_manager.cluster&.user&.email + end + validates :email, presence: true def chart diff --git a/app/models/pool_repository.rb b/app/models/pool_repository.rb index bad0e30ceb5..dbde00b5584 100644 --- a/app/models/pool_repository.rb +++ b/app/models/pool_repository.rb @@ -1,12 +1,89 @@ # frozen_string_literal: true +# The PoolRepository model is the database equivalent of an ObjectPool for Gitaly +# That is; PoolRepository is the record in the database, ObjectPool is the +# repository on disk class PoolRepository < ActiveRecord::Base include Shardable + include AfterCommitQueue + + has_one :source_project, class_name: 'Project' + validates :source_project, presence: true has_many :member_projects, class_name: 'Project' after_create :correct_disk_path + state_machine :state, initial: :none do + state :scheduled + state :ready + state :failed + + event :schedule do + transition none: :scheduled + end + + event :mark_ready do + transition [:scheduled, :failed] => :ready + end + + event :mark_failed do + transition all => :failed + end + + state all - [:ready] do + def joinable? + false + end + end + + state :ready do + def joinable? + true + end + end + + after_transition none: :scheduled do |pool, _| + pool.run_after_commit do + ::ObjectPool::CreateWorker.perform_async(pool.id) + end + end + + after_transition scheduled: :ready do |pool, _| + pool.run_after_commit do + ::ObjectPool::ScheduleJoinWorker.perform_async(pool.id) + end + end + end + + def create_object_pool + object_pool.create + end + + # The members of the pool should have fetched the missing objects to their own + # objects directory. If the caller fails to do so, data loss might occur + def delete_object_pool + object_pool.delete + end + + def link_repository(repository) + object_pool.link(repository.raw) + end + + # This RPC can cause data loss, as not all objects are present the local repository + # No execution path yet, will be added through: + # https://gitlab.com/gitlab-org/gitaly/issues/1415 + def delete_repository_alternate(repository) + object_pool.unlink_repository(repository.raw) + end + + def object_pool + @object_pool ||= Gitlab::Git::ObjectPool.new( + shard.name, + disk_path + '.git', + source_project.repository.raw) + end + private def correct_disk_path diff --git a/app/models/project.rb b/app/models/project.rb index 9e736a3b03c..f5dc58cd67f 100644 --- a/app/models/project.rb +++ b/app/models/project.rb @@ -1585,6 +1585,7 @@ class Project < ActiveRecord::Base import_state.remove_jid update_project_counter_caches after_create_default_branch + join_pool_repository refresh_markdown_cache! end @@ -1981,8 +1982,48 @@ class Project < ActiveRecord::Base Gitlab::CurrentSettings.max_attachment_size.megabytes.to_i end + def object_pool_params + return {} unless !forked? && git_objects_poolable? + + { + repository_storage: repository_storage, + pool_repository: pool_repository || create_new_pool_repository + } + end + + # Git objects are only poolable when the project is or has: + # - Hashed storage -> The object pool will have a remote to its members, using relative paths. + # If the repository path changes we would have to update the remote. + # - Public -> User will be able to fetch Git objects that might not exist + # in their own repository. + # - Repository -> Else the disk path will be empty, and there's nothing to pool + def git_objects_poolable? + hashed_storage?(:repository) && + public? && + repository_exists? && + Gitlab::CurrentSettings.hashed_storage_enabled && + Feature.enabled?(:object_pools, self) + end + private + def create_new_pool_repository + pool = begin + create_or_find_pool_repository!(shard: Shard.by_name(repository_storage), source_project: self) + rescue ActiveRecord::RecordNotUnique + retry + end + + pool.schedule + pool + end + + def join_pool_repository + return unless pool_repository + + ObjectPool::JoinWorker.perform_async(pool_repository.id, self.id) + end + def use_hashed_storage if self.new_record? && Gitlab::CurrentSettings.hashed_storage_enabled self.storage_version = LATEST_STORAGE_VERSION diff --git a/app/serializers/cluster_application_entity.rb b/app/serializers/cluster_application_entity.rb index 2bd17e58086..7b1a0be75ca 100644 --- a/app/serializers/cluster_application_entity.rb +++ b/app/serializers/cluster_application_entity.rb @@ -6,4 +6,5 @@ class ClusterApplicationEntity < Grape::Entity expose :status_reason expose :external_ip, if: -> (e, _) { e.respond_to?(:external_ip) } expose :hostname, if: -> (e, _) { e.respond_to?(:hostname) } + expose :email, if: -> (e, _) { e.respond_to?(:email) } end diff --git a/app/services/clusters/applications/create_service.rb b/app/services/clusters/applications/create_service.rb index a89772e82dc..92c2c1b9834 100644 --- a/app/services/clusters/applications/create_service.rb +++ b/app/services/clusters/applications/create_service.rb @@ -20,7 +20,7 @@ module Clusters end if application.has_attribute?(:email) - application.email = current_user.email + application.email = params[:email] end if application.respond_to?(:oauth_application) diff --git a/app/services/projects/fork_service.rb b/app/services/projects/fork_service.rb index 8dc0e044875..91091c4393d 100644 --- a/app/services/projects/fork_service.rb +++ b/app/services/projects/fork_service.rb @@ -54,6 +54,8 @@ module Projects new_params[:avatar] = @project.avatar end + new_params.merge!(@project.object_pool_params) + new_project = CreateService.new(current_user, new_params).execute return new_project unless new_project.persisted? diff --git a/app/workers/all_queues.yml b/app/workers/all_queues.yml index 672c77539af..dfce00a10a1 100644 --- a/app/workers/all_queues.yml +++ b/app/workers/all_queues.yml @@ -85,6 +85,10 @@ - todos_destroyer:todos_destroyer_project_private - todos_destroyer:todos_destroyer_private_features +- object_pool:object_pool_create +- object_pool:object_pool_schedule_join +- object_pool:object_pool_join + - default - mailers # ActionMailer::DeliveryJob.queue_name diff --git a/app/workers/concerns/object_pool_queue.rb b/app/workers/concerns/object_pool_queue.rb new file mode 100644 index 00000000000..5b648df9c72 --- /dev/null +++ b/app/workers/concerns/object_pool_queue.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +## +# Concern for setting Sidekiq settings for the various ObjectPool queues +# +module ObjectPoolQueue + extend ActiveSupport::Concern + + included do + queue_namespace :object_pool + end +end diff --git a/app/workers/git_garbage_collect_worker.rb b/app/workers/git_garbage_collect_worker.rb index 2d381c6fd6c..d3628b23189 100644 --- a/app/workers/git_garbage_collect_worker.rb +++ b/app/workers/git_garbage_collect_worker.rb @@ -28,6 +28,8 @@ class GitGarbageCollectWorker # Refresh the branch cache in case garbage collection caused a ref lookup to fail flush_ref_caches(project) if task == :gc + project.repository.expire_statistics_caches + # In case pack files are deleted, release libgit2 cache and open file # descriptors ASAP instead of waiting for Ruby garbage collection project.cleanup diff --git a/app/workers/object_pool/create_worker.rb b/app/workers/object_pool/create_worker.rb new file mode 100644 index 00000000000..135b99886dc --- /dev/null +++ b/app/workers/object_pool/create_worker.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module ObjectPool + class CreateWorker + include ApplicationWorker + include ObjectPoolQueue + include ExclusiveLeaseGuard + + attr_reader :pool + + def perform(pool_id) + @pool = PoolRepository.find_by_id(pool_id) + return unless pool + + try_obtain_lease do + perform_pool_creation + end + end + + private + + def perform_pool_creation + return unless pool.failed? || pool.scheduled? + + # If this is a retry and the previous execution failed, deletion will + # bring the pool back to a pristine state + pool.delete_object_pool if pool.failed? + + pool.create_object_pool + pool.mark_ready + rescue => e + pool.mark_failed + raise e + end + + def lease_key + "object_pool:create:#{pool.id}" + end + + def lease_timeout + 1.hour + end + end +end diff --git a/app/workers/object_pool/join_worker.rb b/app/workers/object_pool/join_worker.rb new file mode 100644 index 00000000000..07676011b2a --- /dev/null +++ b/app/workers/object_pool/join_worker.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ObjectPool + class JoinWorker + include ApplicationWorker + include ObjectPoolQueue + + def perform(pool_id, project_id) + pool = PoolRepository.find_by_id(pool_id) + return unless pool&.joinable? + + project = Project.find_by_id(project_id) + return unless project + + pool.link_repository(project.repository) + + Projects::HousekeepingService.new(project).execute + end + end +end diff --git a/app/workers/object_pool/schedule_join_worker.rb b/app/workers/object_pool/schedule_join_worker.rb new file mode 100644 index 00000000000..647a8b72435 --- /dev/null +++ b/app/workers/object_pool/schedule_join_worker.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +module ObjectPool + class ScheduleJoinWorker + include ApplicationWorker + include ObjectPoolQueue + + def perform(pool_id) + pool = PoolRepository.find_by_id(pool_id) + return unless pool&.joinable? + + pool.member_projects.find_each do |project| + next if project.forked? && !project.import_finished? + + ObjectPool::JoinWorker.perform_async(pool.id, project.id) + end + end + end +end diff --git a/changelogs/unreleased/cert-manager-email.yml b/changelogs/unreleased/cert-manager-email.yml new file mode 100644 index 00000000000..530608d9660 --- /dev/null +++ b/changelogs/unreleased/cert-manager-email.yml @@ -0,0 +1,5 @@ +--- +title: Ability to override email for cert-manager +merge_request: 23503 +author: Amit Rathi +type: added diff --git a/changelogs/unreleased/zj-pool-repository-creation.yml b/changelogs/unreleased/zj-pool-repository-creation.yml new file mode 100644 index 00000000000..a24b96e4924 --- /dev/null +++ b/changelogs/unreleased/zj-pool-repository-creation.yml @@ -0,0 +1,5 @@ +--- +title: Allow public forks to be deduplicated +merge_request: 23508 +author: +type: added diff --git a/config/sidekiq_queues.yml b/config/sidekiq_queues.yml index 4782a223561..5985569bef4 100644 --- a/config/sidekiq_queues.yml +++ b/config/sidekiq_queues.yml @@ -81,5 +81,6 @@ - [delete_diff_files, 1] - [detect_repository_languages, 1] - [auto_devops, 2] + - [object_pool, 1] - [repository_cleanup, 1] - [delete_stored_files, 1] diff --git a/db/migrate/20181128123704_add_state_to_pool_repository.rb b/db/migrate/20181128123704_add_state_to_pool_repository.rb new file mode 100644 index 00000000000..714232ede56 --- /dev/null +++ b/db/migrate/20181128123704_add_state_to_pool_repository.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +class AddStateToPoolRepository < ActiveRecord::Migration[5.0] + include Gitlab::Database::MigrationHelpers + + DOWNTIME = false + + # Given the table is empty, and the non concurrent methods are chosen so + # the transactions don't have to be disabled + # rubocop: disable Migration/AddConcurrentForeignKey, Migration/AddIndex + def change + add_column(:pool_repositories, :state, :string, null: true) + + add_column :pool_repositories, :source_project_id, :integer + add_index :pool_repositories, :source_project_id, unique: true + add_foreign_key :pool_repositories, :projects, column: :source_project_id, on_delete: :nullify + end + # rubocop: enable Migration/AddConcurrentForeignKey, Migration/AddIndex +end diff --git a/db/schema.rb b/db/schema.rb index d7124100621..ff2dde3243c 100644 --- a/db/schema.rb +++ b/db/schema.rb @@ -1512,8 +1512,11 @@ ActiveRecord::Schema.define(version: 20181203002526) do create_table "pool_repositories", id: :bigserial, force: :cascade do |t| t.integer "shard_id", null: false t.string "disk_path" + t.string "state" + t.integer "source_project_id" t.index ["disk_path"], name: "index_pool_repositories_on_disk_path", unique: true, using: :btree t.index ["shard_id"], name: "index_pool_repositories_on_shard_id", using: :btree + t.index ["source_project_id"], name: "index_pool_repositories_on_source_project_id", unique: true, using: :btree end create_table "programming_languages", force: :cascade do |t| @@ -2393,6 +2396,7 @@ ActiveRecord::Schema.define(version: 20181203002526) do add_foreign_key "oauth_openid_requests", "oauth_access_grants", column: "access_grant_id", name: "fk_oauth_openid_requests_oauth_access_grants_access_grant_id" add_foreign_key "pages_domains", "projects", name: "fk_ea2f6dfc6f", on_delete: :cascade add_foreign_key "personal_access_tokens", "users" + add_foreign_key "pool_repositories", "projects", column: "source_project_id", on_delete: :nullify add_foreign_key "pool_repositories", "shards", on_delete: :restrict add_foreign_key "project_authorizations", "projects", on_delete: :cascade add_foreign_key "project_authorizations", "users", on_delete: :cascade diff --git a/doc/administration/repository_storage_types.md b/doc/administration/repository_storage_types.md index 9379944b250..12238ba7b32 100644 --- a/doc/administration/repository_storage_types.md +++ b/doc/administration/repository_storage_types.md @@ -94,6 +94,23 @@ need to be performed on these nodes as well. Database changes will propagate wit You must make sure the migration event was already processed or otherwise it may migrate the files back to Hashed state again. +#### Hashed object pools + +For deduplication of public forks and their parent repository, objects are pooled +in an object pool. These object pools are a third repository where shared objects +are stored. + +```ruby +# object pool paths +"@pools/#{hash[0..1]}/#{hash[2..3]}/#{hash}.git" +``` + +The object pool feature is behind the `object_pools` feature flag, and can be +enabled for individual projects by executing +`Feature.enable(:object_pools, Project.find(<id>))`. Note that the project has to +be on hashed storage, should not be a fork itself, and hashed storage should be +enabled for all new projects. + ##### Attachments To rollback single Attachment migration, rename `aa/bb/abcdef1234567890...` folder back to `namespace/project`. diff --git a/doc/user/project/clusters/index.md b/doc/user/project/clusters/index.md index 6d05e2feeec..e40525d2577 100644 --- a/doc/user/project/clusters/index.md +++ b/doc/user/project/clusters/index.md @@ -267,7 +267,7 @@ deployments. | ----------- | :------------: | ----------- | --------------- | | [Helm Tiller](https://docs.helm.sh/) | 10.2+ | Helm is a package manager for Kubernetes and is required to install all the other applications. It is installed in its own pod inside the cluster which can run the `helm` CLI in a safe environment. | n/a | | [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) | 10.2+ | Ingress can provide load balancing, SSL termination, and name-based virtual hosting. It acts as a web proxy for your applications and is useful if you want to use [Auto DevOps] or deploy your own web apps. | [stable/nginx-ingress](https://github.com/helm/charts/tree/master/stable/nginx-ingress) | -| [Cert Manager](http://docs.cert-manager.io/en/latest/) | 11.6+ | Cert Manager is a native Kubernetes certificate management controller that helps with issuing certificates. Installing Cert Manager on your cluster will issue a certificate by [Let's Encrypt](https://letsencrypt.org/) and ensure that certificates are valid and up to date. The email address used by Let's Encrypt registration will be taken from the GitLab user that installed Cert Manager on the cluster. | [stable/cert-manager](https://github.com/helm/charts/tree/master/stable/cert-manager) | +| [Cert Manager](http://docs.cert-manager.io/en/latest/) | 11.6+ | Cert Manager is a native Kubernetes certificate management controller that helps with issuing certificates. Installing Cert Manager on your cluster will issue a certificate by [Let's Encrypt](https://letsencrypt.org/) and ensure that certificates are valid and up-to-date. | [stable/cert-manager](https://github.com/helm/charts/tree/master/stable/cert-manager) | | [Prometheus](https://prometheus.io/docs/introduction/overview/) | 10.4+ | Prometheus is an open-source monitoring and alerting system useful to supervise your deployed applications. | [stable/prometheus](https://github.com/helm/charts/tree/master/stable/prometheus) | | [GitLab Runner](https://docs.gitlab.com/runner/) | 10.6+ | GitLab Runner is the open source project that is used to run your jobs and send the results back to GitLab. It is used in conjunction with [GitLab CI/CD](https://about.gitlab.com/features/gitlab-ci-cd/), the open-source continuous integration service included with GitLab that coordinates the jobs. When installing the GitLab Runner via the applications, it will run in **privileged mode** by default. Make sure you read the [security implications](#security-implications) before doing so. | [runner/gitlab-runner](https://gitlab.com/charts/gitlab-runner) | | [JupyterHub](http://jupyter.org/) | 11.0+ | [JupyterHub](https://jupyterhub.readthedocs.io/en/stable/) is a multi-user service for managing notebooks across a team. [Jupyter Notebooks](https://jupyter-notebook.readthedocs.io/en/latest/) provide a web-based interactive programming environment used for data analysis, visualization, and machine learning. We use [this](https://gitlab.com/gitlab-org/jupyterhub-user-image/blob/master/Dockerfile) custom Jupyter image that installs additional useful packages on top of the base Jupyter. You will also see ready-to-use DevOps Runbooks built with Nurtch's [Rubix library](https://github.com/amit1rrr/rubix). More information on creating executable runbooks can be found at [Nurtch Documentation](http://docs.nurtch.com/en/latest). **Note**: Authentication will be enabled for any user of the GitLab server via OAuth2. HTTPS will be supported in a future release. | [jupyter/jupyterhub](https://jupyterhub.github.io/helm-chart/) | diff --git a/lib/banzai/filter/milestone_reference_filter.rb b/lib/banzai/filter/milestone_reference_filter.rb index 328c8c1803b..c70c3f0c04e 100644 --- a/lib/banzai/filter/milestone_reference_filter.rb +++ b/lib/banzai/filter/milestone_reference_filter.rb @@ -4,6 +4,8 @@ module Banzai module Filter # HTML filter that replaces milestone references with links. class MilestoneReferenceFilter < AbstractReferenceFilter + include Gitlab::Utils::StrongMemoize + self.reference_type = :milestone def self.object_class @@ -13,16 +15,34 @@ module Banzai # Links to project milestones contain the IID, but when we're handling # 'regular' references, we need to use the global ID to disambiguate # between group and project milestones. - def find_object(project, id) - return unless project.is_a?(Project) + def find_object(parent, id) + return unless valid_context?(parent) - find_milestone_with_finder(project, id: id) + find_milestone_with_finder(parent, id: id) end - def find_object_from_link(project, iid) - return unless project.is_a?(Project) + def find_object_from_link(parent, iid) + return unless valid_context?(parent) + + find_milestone_with_finder(parent, iid: iid) + end + + def valid_context?(parent) + strong_memoize(:valid_context) do + group_context?(parent) || project_context?(parent) + end + end + + def group_context?(parent) + strong_memoize(:group_context) do + parent.is_a?(Group) + end + end - find_milestone_with_finder(project, iid: iid) + def project_context?(parent) + strong_memoize(:project_context) do + parent.is_a?(Project) + end end def references_in(text, pattern = Milestone.reference_pattern) @@ -44,13 +64,15 @@ module Banzai def find_milestone(project_ref, namespace_ref, milestone_id, milestone_name) project_path = full_project_path(namespace_ref, project_ref) - project = parent_from_ref(project_path) - return unless project && project.is_a?(Project) + # Returns group if project is not found by path + parent = parent_from_ref(project_path) + + return unless parent milestone_params = milestone_params(milestone_id, milestone_name) - find_milestone_with_finder(project, milestone_params) + find_milestone_with_finder(parent, milestone_params) end def milestone_params(iid, name) @@ -61,16 +83,28 @@ module Banzai end end - def find_milestone_with_finder(project, params) - finder_params = { project_ids: [project.id], order: nil, state: 'all' } + def find_milestone_with_finder(parent, params) + finder_params = milestone_finder_params(parent, params[:iid].present?) + + MilestonesFinder.new(finder_params).find_by(params) + end - # We don't support IID lookups for group milestones, because IIDs can - # clash between group and project milestones. - if project.group && !params[:iid] - finder_params[:group_ids] = project.group.self_and_ancestors_ids + def milestone_finder_params(parent, find_by_iid) + { order: nil, state: 'all' }.tap do |params| + params[:project_ids] = parent.id if project_context?(parent) + + # We don't support IID lookups because IIDs can clash between + # group/project milestones and group/subgroup milestones. + params[:group_ids] = self_and_ancestors_ids(parent) unless find_by_iid end + end - MilestonesFinder.new(finder_params).find_by(params) + def self_and_ancestors_ids(parent) + if group_context?(parent) + parent.self_and_ancestors_ids + elsif project_context?(parent) + parent.group&.self_and_ancestors_ids + end end def url_for_object(milestone, project) diff --git a/lib/gitlab/git/object_pool.rb b/lib/gitlab/git/object_pool.rb new file mode 100644 index 00000000000..558699a6318 --- /dev/null +++ b/lib/gitlab/git/object_pool.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +module Gitlab + module Git + class ObjectPool + # GL_REPOSITORY has to be passed for Gitlab::Git::Repositories, but not + # used for ObjectPools. + GL_REPOSITORY = "" + + delegate :exists?, :size, to: :repository + delegate :delete, to: :object_pool_service + + attr_reader :storage, :relative_path, :source_repository + + def initialize(storage, relative_path, source_repository) + @storage = storage + @relative_path = relative_path + @source_repository = source_repository + end + + def create + object_pool_service.create(source_repository) + end + + def link(to_link_repo) + remote_name = to_link_repo.object_pool_remote_name + repository.set_config( + "remote.#{remote_name}.url" => relative_path_to(to_link_repo.relative_path), + "remote.#{remote_name}.tagOpt" => "--no-tags", + "remote.#{remote_name}.fetch" => "+refs/*:refs/remotes/#{remote_name}/*" + ) + + object_pool_service.link_repository(to_link_repo) + end + + def gitaly_object_pool + Gitaly::ObjectPool.new(repository: to_gitaly_repository) + end + + def to_gitaly_repository + Gitlab::GitalyClient::Util.repository(storage, relative_path, GL_REPOSITORY) + end + + # Allows for reusing other RPCs by 'tricking' Gitaly to think its a repository + def repository + @repository ||= Gitlab::Git::Repository.new(storage, relative_path, GL_REPOSITORY) + end + + private + + def object_pool_service + @object_pool_service ||= Gitlab::GitalyClient::ObjectPoolService.new(self) + end + + def relative_path_to(pool_member_path) + pool_path = Pathname.new("#{relative_path}#{File::SEPARATOR}") + + Pathname.new(pool_member_path).relative_path_from(pool_path).to_s + end + end + end +end diff --git a/lib/gitlab/git/repository.rb b/lib/gitlab/git/repository.rb index 0a541031884..5bbedc9d5e3 100644 --- a/lib/gitlab/git/repository.rb +++ b/lib/gitlab/git/repository.rb @@ -69,6 +69,13 @@ module Gitlab attr_reader :storage, :gl_repository, :relative_path + # This remote name has to be stable for all types of repositories that + # can join an object pool. If it's structure ever changes, a migration + # has to be performed on the object pools to update the remote names. + # Else the pool can't be updated anymore and is left in an inconsistent + # state. + alias_method :object_pool_remote_name, :gl_repository + # This initializer method is only used on the client side (gitlab-ce). # Gitaly-ruby uses a different initializer. def initialize(storage, relative_path, gl_repository) diff --git a/lib/gitlab/gitaly_client/object_pool_service.rb b/lib/gitlab/gitaly_client/object_pool_service.rb new file mode 100644 index 00000000000..272ce73ad64 --- /dev/null +++ b/lib/gitlab/gitaly_client/object_pool_service.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Gitlab + module GitalyClient + class ObjectPoolService + attr_reader :object_pool, :storage + + def initialize(object_pool) + @object_pool = object_pool.gitaly_object_pool + @storage = object_pool.storage + end + + def create(repository) + request = Gitaly::CreateObjectPoolRequest.new( + object_pool: object_pool, + origin: repository.gitaly_repository) + + GitalyClient.call(storage, :object_pool_service, :create_object_pool, request) + end + + def delete + request = Gitaly::DeleteObjectPoolRequest.new(object_pool: object_pool) + + GitalyClient.call(storage, :object_pool_service, :delete_object_pool, request) + end + + def link_repository(repository) + request = Gitaly::LinkRepositoryToObjectPoolRequest.new( + object_pool: object_pool, + repository: repository.gitaly_repository + ) + + GitalyClient.call(storage, :object_pool_service, :link_repository_to_object_pool, + request, timeout: GitalyClient.fast_timeout) + end + + def unlink_repository(repository) + request = Gitaly::UnlinkRepositoryFromObjectPoolRequest.new(repository: repository.gitaly_repository) + + GitalyClient.call(storage, :object_pool_service, :unlink_repository_from_object_pool, + request, timeout: GitalyClient.fast_timeout) + end + end + end +end diff --git a/locale/gitlab.pot b/locale/gitlab.pot index d7d03151cdf..2aeb015ed09 100644 --- a/locale/gitlab.pot +++ b/locale/gitlab.pot @@ -1473,6 +1473,9 @@ msgstr "" msgid "ClusterIntegration|Cert-Manager" msgstr "" +msgid "ClusterIntegration|Cert-Manager is a native Kubernetes certificate management controller that helps with issuing certificates. Installing Cert-Manager on your cluster will issue a certificate by %{letsEncrypt} and ensure that certificates are valid and up-to-date." +msgstr "" + msgid "ClusterIntegration|Certificate Authority bundle (PEM format)" msgstr "" @@ -1587,6 +1590,12 @@ msgstr "" msgid "ClusterIntegration|Integration status" msgstr "" +msgid "ClusterIntegration|Issuer Email" +msgstr "" + +msgid "ClusterIntegration|Issuers represent a certificate authority. You must provide an email address for your Issuer. " +msgstr "" + msgid "ClusterIntegration|Jupyter Hostname" msgstr "" @@ -1806,9 +1815,6 @@ msgstr "" msgid "ClusterIntegration|access to Google Kubernetes Engine" msgstr "" -msgid "ClusterIntegration|cert-manager is a native Kubernetes certificate management controller that helps with issuing certificates. Installing cert-manager on your cluster will issue a certificate by %{letsEncrypt} and ensure that certificates are valid and up to date." -msgstr "" - msgid "ClusterIntegration|check the pricing here" msgstr "" diff --git a/spec/controllers/projects_controller_spec.rb b/spec/controllers/projects_controller_spec.rb index 7849bec4762..576191a5788 100644 --- a/spec/controllers/projects_controller_spec.rb +++ b/spec/controllers/projects_controller_spec.rb @@ -279,7 +279,7 @@ describe ProjectsController do expected_query = /#{public_project.fork_network.find_forks_in(other_user.namespace).to_sql}/ expect { get(:show, namespace_id: public_project.namespace, id: public_project) } - .not_to exceed_query_limit(1).for_query(expected_query) + .not_to exceed_query_limit(2).for_query(expected_query) end end end diff --git a/spec/factories/pool_repositories.rb b/spec/factories/pool_repositories.rb index 2ed0844ed47..265a4643f46 100644 --- a/spec/factories/pool_repositories.rb +++ b/spec/factories/pool_repositories.rb @@ -1,5 +1,26 @@ FactoryBot.define do factory :pool_repository do - shard + shard { Shard.by_name("default") } + state :none + + before(:create) do |pool| + pool.source_project = create(:project, :repository) + end + + trait :scheduled do + state :scheduled + end + + trait :failed do + state :failed + end + + trait :ready do + state :ready + + after(:create) do |pool| + pool.create_object_pool + end + end end end diff --git a/spec/features/projects/clusters/applications_spec.rb b/spec/features/projects/clusters/applications_spec.rb index 71d715237f5..8918a7b7b9c 100644 --- a/spec/features/projects/clusters/applications_spec.rb +++ b/spec/features/projects/clusters/applications_spec.rb @@ -70,6 +70,44 @@ describe 'Clusters Applications', :js do end end + context 'when user installs Cert Manager' do + before do + allow(ClusterInstallAppWorker).to receive(:perform_async) + allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_in) + allow(ClusterWaitForIngressIpAddressWorker).to receive(:perform_async) + + create(:clusters_applications_helm, :installed, cluster: cluster) + + page.within('.js-cluster-application-row-cert_manager') do + click_button 'Install' + end + end + + it 'shows status transition' do + def email_form_value + page.find('.js-email').value + end + + page.within('.js-cluster-application-row-cert_manager') do + expect(email_form_value).to eq(cluster.user.email) + expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Install') + + page.find('.js-email').set("new_email@example.org") + Clusters::Cluster.last.application_cert_manager.make_installing! + + expect(email_form_value).to eq('new_email@example.org') + expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installing') + + Clusters::Cluster.last.application_cert_manager.make_installed! + + expect(email_form_value).to eq('new_email@example.org') + expect(page).to have_css('.js-cluster-application-install-button', exact_text: 'Installed') + end + + expect(page).to have_content('Cert-Manager was successfully installed on your Kubernetes cluster') + end + end + context 'when user installs Ingress' do context 'when user installs application: Ingress' do before do diff --git a/spec/fixtures/api/schemas/cluster_status.json b/spec/fixtures/api/schemas/cluster_status.json index ccef17a6615..3d9e0628f63 100644 --- a/spec/fixtures/api/schemas/cluster_status.json +++ b/spec/fixtures/api/schemas/cluster_status.json @@ -32,7 +32,8 @@ }, "status_reason": { "type": ["string", "null"] }, "external_ip": { "type": ["string", "null"] }, - "hostname": { "type": ["string", "null"] } + "hostname": { "type": ["string", "null"] }, + "email": { "type": ["string", "null"] } }, "required" : [ "name", "status" ] } diff --git a/spec/javascripts/clusters/components/applications_spec.js b/spec/javascripts/clusters/components/applications_spec.js index e46edec9abb..14ef1193984 100644 --- a/spec/javascripts/clusters/components/applications_spec.js +++ b/spec/javascripts/clusters/components/applications_spec.js @@ -176,6 +176,54 @@ describe('Applications', () => { }); }); + describe('Cert-Manager application', () => { + describe('when not installed', () => { + it('renders email & allows editing', () => { + vm = mountComponent(Applications, { + applications: { + helm: { title: 'Helm Tiller', status: 'installed' }, + ingress: { title: 'Ingress', status: 'installed', externalIp: '1.1.1.1' }, + cert_manager: { + title: 'Cert-Manager', + email: 'before@example.com', + status: 'installable', + }, + runner: { title: 'GitLab Runner' }, + prometheus: { title: 'Prometheus' }, + jupyter: { title: 'JupyterHub', hostname: '', status: 'installable' }, + knative: { title: 'Knative', hostname: '', status: 'installable' }, + }, + }); + + expect(vm.$el.querySelector('.js-email').value).toEqual('before@example.com'); + expect(vm.$el.querySelector('.js-email').getAttribute('readonly')).toBe(null); + }); + }); + + describe('when installed', () => { + it('renders email in readonly', () => { + vm = mountComponent(Applications, { + applications: { + helm: { title: 'Helm Tiller', status: 'installed' }, + ingress: { title: 'Ingress', status: 'installed', externalIp: '1.1.1.1' }, + cert_manager: { + title: 'Cert-Manager', + email: 'after@example.com', + status: 'installed', + }, + runner: { title: 'GitLab Runner' }, + prometheus: { title: 'Prometheus' }, + jupyter: { title: 'JupyterHub', hostname: '', status: 'installable' }, + knative: { title: 'Knative', hostname: '', status: 'installable' }, + }, + }); + + expect(vm.$el.querySelector('.js-email').value).toEqual('after@example.com'); + expect(vm.$el.querySelector('.js-email').getAttribute('readonly')).toEqual('readonly'); + }); + }); + }); + describe('Jupyter application', () => { describe('with ingress installed with ip & jupyter installable', () => { it('renders hostname active input', () => { diff --git a/spec/javascripts/clusters/services/mock_data.js b/spec/javascripts/clusters/services/mock_data.js index 540d7f30858..3c3d9977ffb 100644 --- a/spec/javascripts/clusters/services/mock_data.js +++ b/spec/javascripts/clusters/services/mock_data.js @@ -42,6 +42,7 @@ const CLUSTERS_MOCK_DATA = { name: 'cert_manager', status: APPLICATION_STATUS.ERROR, status_reason: 'Cannot connect', + email: 'test@example.com', }, ], }, @@ -86,6 +87,7 @@ const CLUSTERS_MOCK_DATA = { name: 'cert_manager', status: APPLICATION_STATUS.ERROR, status_reason: 'Cannot connect', + email: 'test@example.com', }, ], }, diff --git a/spec/javascripts/clusters/stores/clusters_store_spec.js b/spec/javascripts/clusters/stores/clusters_store_spec.js index 7ea0878ad45..1ca55549094 100644 --- a/spec/javascripts/clusters/stores/clusters_store_spec.js +++ b/spec/javascripts/clusters/stores/clusters_store_spec.js @@ -115,6 +115,7 @@ describe('Clusters Store', () => { statusReason: mockResponseData.applications[6].status_reason, requestStatus: null, requestReason: null, + email: mockResponseData.applications[6].email, }, }, }); diff --git a/spec/lib/banzai/filter/milestone_reference_filter_spec.rb b/spec/lib/banzai/filter/milestone_reference_filter_spec.rb index 91d4a60ba95..1a87cfa5b45 100644 --- a/spec/lib/banzai/filter/milestone_reference_filter_spec.rb +++ b/spec/lib/banzai/filter/milestone_reference_filter_spec.rb @@ -351,21 +351,50 @@ describe Banzai::Filter::MilestoneReferenceFilter do end context 'group context' do - let(:context) { { project: nil, group: create(:group) } } - let(:milestone) { create(:milestone, project: project) } + let(:group) { create(:group) } + let(:context) { { project: nil, group: group } } - it 'links to a valid reference' do - reference = "#{project.full_path}%#{milestone.iid}" + context 'when project milestone' do + let(:milestone) { create(:milestone, project: project) } - result = reference_filter("See #{reference}", context) + it 'links to a valid reference' do + reference = "#{project.full_path}%#{milestone.iid}" - expect(result.css('a').first.attr('href')).to eq(urls.milestone_url(milestone)) + result = reference_filter("See #{reference}", context) + + expect(result.css('a').first.attr('href')).to eq(urls.milestone_url(milestone)) + end + + it 'ignores internal references' do + exp = act = "See %#{milestone.iid}" + + expect(reference_filter(act, context).to_html).to eq exp + end end - it 'ignores internal references' do - exp = act = "See %#{milestone.iid}" + context 'when group milestone' do + let(:group_milestone) { create(:milestone, title: 'group_milestone', group: group) } - expect(reference_filter(act, context).to_html).to eq exp + context 'for subgroups', :nested_groups do + let(:sub_group) { create(:group, parent: group) } + let(:sub_group_milestone) { create(:milestone, title: 'sub_group_milestone', group: sub_group) } + + it 'links to a valid reference of subgroup and group milestones' do + [group_milestone, sub_group_milestone].each do |milestone| + reference = "%#{milestone.title}" + + result = reference_filter("See #{reference}", { project: nil, group: sub_group }) + + expect(result.css('a').first.attr('href')).to eq(urls.milestone_url(milestone)) + end + end + end + + it 'ignores internal references' do + exp = act = "See %#{group_milestone.iid}" + + expect(reference_filter(act, context).to_html).to eq exp + end end end diff --git a/spec/lib/gitlab/git/object_pool_spec.rb b/spec/lib/gitlab/git/object_pool_spec.rb new file mode 100644 index 00000000000..363c2aa67af --- /dev/null +++ b/spec/lib/gitlab/git/object_pool_spec.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Gitlab::Git::ObjectPool do + let(:pool_repository) { create(:pool_repository) } + let(:source_repository) { pool_repository.source_project.repository } + + subject { pool_repository.object_pool } + + describe '#storage' do + it "equals the pool repository's shard name" do + expect(subject.storage).not_to be_nil + expect(subject.storage).to eq(pool_repository.shard_name) + end + end + + describe '#create' do + before do + subject.create + end + + context "when the pool doesn't exist yet" do + it 'creates the pool' do + expect(subject.exists?).to be(true) + end + end + + context 'when the pool already exists' do + it 'raises an FailedPrecondition' do + expect do + subject.create + end.to raise_error(GRPC::FailedPrecondition) + end + end + end + + describe '#exists?' do + context "when the object pool doesn't exist" do + it 'returns false' do + expect(subject.exists?).to be(false) + end + end + + context 'when the object pool exists' do + let(:pool) { create(:pool_repository, :ready) } + + subject { pool.object_pool } + + it 'returns true' do + expect(subject.exists?).to be(true) + end + end + end + + describe '#link' do + let!(:pool_repository) { create(:pool_repository, :ready) } + + context 'when no remotes are set' do + it 'sets a remote' do + subject.link(source_repository) + + repo = Gitlab::GitalyClient::StorageSettings.allow_disk_access do + Rugged::Repository.new(subject.repository.path) + end + + expect(repo.remotes.count).to be(1) + expect(repo.remotes.first.name).to eq(source_repository.object_pool_remote_name) + end + end + + context 'when the remote is already set' do + before do + subject.link(source_repository) + end + + it "doesn't raise an error" do + subject.link(source_repository) + + repo = Gitlab::GitalyClient::StorageSettings.allow_disk_access do + Rugged::Repository.new(subject.repository.path) + end + + expect(repo.remotes.count).to be(1) + expect(repo.remotes.first.name).to eq(source_repository.object_pool_remote_name) + end + end + end +end diff --git a/spec/lib/gitlab/gitaly_client/object_pool_service_spec.rb b/spec/lib/gitlab/gitaly_client/object_pool_service_spec.rb new file mode 100644 index 00000000000..149b7ec5bb0 --- /dev/null +++ b/spec/lib/gitlab/gitaly_client/object_pool_service_spec.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Gitlab::GitalyClient::ObjectPoolService do + let(:pool_repository) { create(:pool_repository) } + let(:project) { create(:project, :repository) } + let(:raw_repository) { project.repository.raw } + let(:object_pool) { pool_repository.object_pool } + + subject { described_class.new(object_pool) } + + before do + subject.create(raw_repository) + end + + describe '#create' do + it 'exists on disk' do + expect(object_pool.repository.exists?).to be(true) + end + + context 'when the pool already exists' do + it 'returns an error' do + expect do + subject.create(raw_repository) + end.to raise_error(GRPC::FailedPrecondition) + end + end + end + + describe '#delete' do + it 'removes the repository from disk' do + subject.delete + + expect(object_pool.repository.exists?).to be(false) + end + + context 'when called twice' do + it "doesn't raise an error" do + subject.delete + + expect { object_pool.delete }.not_to raise_error + end + end + end +end diff --git a/spec/models/pool_repository_spec.rb b/spec/models/pool_repository_spec.rb index 541e78507e5..3d3878b8c39 100644 --- a/spec/models/pool_repository_spec.rb +++ b/spec/models/pool_repository_spec.rb @@ -5,6 +5,7 @@ require 'spec_helper' describe PoolRepository do describe 'associations' do it { is_expected.to belong_to(:shard) } + it { is_expected.to have_one(:source_project) } it { is_expected.to have_many(:member_projects) } end @@ -12,15 +13,14 @@ describe PoolRepository do let!(:pool_repository) { create(:pool_repository) } it { is_expected.to validate_presence_of(:shard) } + it { is_expected.to validate_presence_of(:source_project) } end describe '#disk_path' do it 'sets the hashed disk_path' do pool = create(:pool_repository) - elements = File.split(pool.disk_path) - - expect(elements).to all( match(/\d{2,}/) ) + expect(pool.disk_path).to match(%r{\A@pools/\h{2}/\h{2}/\h{64}}) end end end diff --git a/spec/models/project_spec.rb b/spec/models/project_spec.rb index 93c83fd21fd..1c85411dc3b 100644 --- a/spec/models/project_spec.rb +++ b/spec/models/project_spec.rb @@ -4092,6 +4092,44 @@ describe Project do end end + describe '#git_objects_poolable?' do + subject { project } + + context 'when the feature flag is turned off' do + before do + stub_feature_flags(object_pools: false) + end + + let(:project) { create(:project, :repository, :public) } + + it { is_expected.not_to be_git_objects_poolable } + end + + context 'when the feature flag is enabled' do + context 'when not using hashed storage' do + let(:project) { create(:project, :legacy_storage, :public, :repository) } + + it { is_expected.not_to be_git_objects_poolable } + end + + context 'when the project is not public' do + let(:project) { create(:project, :private) } + + it { is_expected.not_to be_git_objects_poolable } + end + + context 'when objects are poolable' do + let(:project) { create(:project, :repository, :public) } + + before do + stub_application_setting(hashed_storage_enabled: true) + end + + it { is_expected.to be_git_objects_poolable } + end + end + end + def rugged_config rugged_repo(project.repository).config end diff --git a/spec/services/clusters/applications/create_service_spec.rb b/spec/services/clusters/applications/create_service_spec.rb index 0bd7719345e..1a2ca23748a 100644 --- a/spec/services/clusters/applications/create_service_spec.rb +++ b/spec/services/clusters/applications/create_service_spec.rb @@ -31,6 +31,31 @@ describe Clusters::Applications::CreateService do subject end + context 'cert manager application' do + let(:params) do + { + application: 'cert_manager', + email: 'test@example.com' + } + end + + before do + allow_any_instance_of(Clusters::Applications::ScheduleInstallationService).to receive(:execute) + end + + it 'creates the application' do + expect do + subject + + cluster.reload + end.to change(cluster, :application_cert_manager) + end + + it 'sets the email' do + expect(subject.email).to eq('test@example.com') + end + end + context 'jupyter application' do let(:params) do { diff --git a/spec/services/projects/fork_service_spec.rb b/spec/services/projects/fork_service_spec.rb index a3d24ae312a..26e8d829345 100644 --- a/spec/services/projects/fork_service_spec.rb +++ b/spec/services/projects/fork_service_spec.rb @@ -2,7 +2,8 @@ require 'spec_helper' describe Projects::ForkService do include ProjectForksHelper - let(:gitlab_shell) { Gitlab::Shell.new } + include Gitlab::ShellAdapter + context 'when forking a new project' do describe 'fork by user' do before do @@ -235,6 +236,33 @@ describe Projects::ForkService do end end + context 'when forking with object pools' do + let(:fork_from_project) { create(:project, :public) } + let(:forker) { create(:user) } + + before do + stub_feature_flags(object_pools: true) + end + + context 'when no pool exists' do + it 'creates a new object pool' do + forked_project = fork_project(fork_from_project, forker) + + expect(forked_project.pool_repository).to eq(fork_from_project.pool_repository) + end + end + + context 'when a pool already exists' do + let!(:pool_repository) { create(:pool_repository, source_project: fork_from_project) } + + it 'joins the object pool' do + forked_project = fork_project(fork_from_project, forker) + + expect(forked_project.pool_repository).to eq(fork_from_project.pool_repository) + end + end + end + context 'when linking fork to an existing project' do let(:fork_from_project) { create(:project, :public) } let(:fork_to_project) { create(:project, :public) } diff --git a/spec/workers/object_pool/create_worker_spec.rb b/spec/workers/object_pool/create_worker_spec.rb new file mode 100644 index 00000000000..06416489472 --- /dev/null +++ b/spec/workers/object_pool/create_worker_spec.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe ObjectPool::CreateWorker do + let(:pool) { create(:pool_repository, :scheduled) } + + subject { described_class.new } + + describe '#perform' do + context 'when the pool creation is successful' do + it 'marks the pool as ready' do + subject.perform(pool.id) + + expect(pool.reload).to be_ready + end + end + + context 'when a the pool already exists' do + before do + pool.create_object_pool + end + + it 'cleans up the pool' do + expect do + subject.perform(pool.id) + end.to raise_error(GRPC::FailedPrecondition) + + expect(pool.reload.failed?).to be(true) + end + end + + context 'when the server raises an unknown error' do + before do + allow_any_instance_of(PoolRepository).to receive(:create_object_pool).and_raise(GRPC::Internal) + end + + it 'marks the pool as failed' do + expect do + subject.perform(pool.id) + end.to raise_error(GRPC::Internal) + + expect(pool.reload.failed?).to be(true) + end + end + + context 'when the pool creation failed before' do + let(:pool) { create(:pool_repository, :failed) } + + it 'deletes the pool first' do + expect_any_instance_of(PoolRepository).to receive(:delete_object_pool) + + subject.perform(pool.id) + + expect(pool.reload).to be_ready + end + end + end +end diff --git a/spec/workers/object_pool/join_worker_spec.rb b/spec/workers/object_pool/join_worker_spec.rb new file mode 100644 index 00000000000..906bc22c8d2 --- /dev/null +++ b/spec/workers/object_pool/join_worker_spec.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe ObjectPool::JoinWorker do + let(:pool) { create(:pool_repository, :ready) } + let(:project) { pool.source_project } + let(:repository) { project.repository } + + subject { described_class.new } + + describe '#perform' do + context "when the pool is not joinable" do + let(:pool) { create(:pool_repository, :scheduled) } + + it "doesn't raise an error" do + expect do + subject.perform(pool.id, project.id) + end.not_to raise_error + end + end + + context 'when the pool has been joined before' do + before do + pool.link_repository(repository) + end + + it 'succeeds in joining' do + expect do + subject.perform(pool.id, project.id) + end.not_to raise_error + end + end + end +end |