diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2021-06-24 10:31:56 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2021-06-24 10:31:56 +0000 |
commit | 159f25da0106c574f2c855b44d5ba4e46822d3a3 (patch) | |
tree | 0c0c451079f5a737e3a45461473f45fb5f845921 | |
parent | f1926d2aa6447173a06fee5e0a3141bea27a0d8d (diff) | |
download | gitlab-ce-159f25da0106c574f2c855b44d5ba4e46822d3a3.tar.gz |
Add latest changes from gitlab-org/gitlab@14-0-stable-ee
52 files changed, 708 insertions, 288 deletions
diff --git a/GITLAB_KAS_VERSION b/GITLAB_KAS_VERSION index 4b964e96540..63dba868a0c 100644 --- a/GITLAB_KAS_VERSION +++ b/GITLAB_KAS_VERSION @@ -1 +1 @@ -14.0.0 +14.0.1 diff --git a/config/feature_flags/development/codequality_mr_diff.yml b/config/feature_flags/development/codequality_mr_diff.yml index fe7ad5a8b4f..ca6846b9390 100644 --- a/config/feature_flags/development/codequality_mr_diff.yml +++ b/config/feature_flags/development/codequality_mr_diff.yml @@ -5,4 +5,4 @@ rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/284140 milestone: '13.7' type: development group: group::testing -default_enabled: true +default_enabled: false diff --git a/config/feature_flags/development/codequality_mr_diff_annotations.yml b/config/feature_flags/development/codequality_mr_diff_annotations.yml index 28e9777f3a8..35fdc8acff8 100644 --- a/config/feature_flags/development/codequality_mr_diff_annotations.yml +++ b/config/feature_flags/development/codequality_mr_diff_annotations.yml @@ -5,4 +5,4 @@ rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/330909 milestone: '14.0' type: development group: group::testing -default_enabled: true +default_enabled: false diff --git a/data/whats_new/202106220001_14_0.yml b/data/whats_new/202106220001_14_0.yml new file mode 100644 index 00000000000..fb3775239d4 --- /dev/null +++ b/data/whats_new/202106220001_14_0.yml @@ -0,0 +1,161 @@ +- title: Streamlined top navigation menu + body: | + GitLab 14.0 introduces an all-new, streamlined top navigation menu to help you get where you're going faster and with fewer clicks. This new, consolidated menu offers the combined functionality of the previous Projects, Groups, and More menus. It gives you access to your projects, groups, and instance-level features with a single click. Additionally, all-new responsive views improve the navigation experience on smaller screens. + stage: Create + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://gitlab.com/gitlab-org/gitlab/-/issues/332635 + image_url: https://about.gitlab.com/images/14_0/consolidated-top-nav.png + published_at: 2021-06-22 + release: 14.0 +- title: Sidebar navigation redesign + body: | + GitLab is big. And it's getting bigger. As we've introduced new features and categories, navigating the densely-packed left sidebar has become less intuitive. + + In GitLab 14.0 we've redesigned and restructured the left sidebar for improved usability, consistency, and discoverability. We've moved some links to features around, split up features in the **Operations** menu into three distinct menus, improved visual contrast, and optimized spacing so all the menu items can fit comfortably on a smaller screen. These changes are intended to better match your mental model of the DevOps lifecycle, and provide a more predictable and consistent experience while navigating within your projects and groups. + stage: Create + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://gitlab.com/gitlab-org/gitlab/-/issues/332635 + image_url: https://about.gitlab.com/images/14_0/redesigned-left-sidebar.png + published_at: 2021-06-22 + release: 14.0 +- title: Merge request reviews in VS Code + body: | + As a developer, you often spend a majority of your time working in your local development environment. When you're assigned a merge request for review, this requires you to leave your editor and perform that review inside of GitLab. While performing your review inside GitLab, you might also need to use your local editor to gain more context on the proposed changes. + + [GitLab Workflow](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) version `3.21.0` for Visual Studio Code (VS Code) now supports the complete merge request review process, including threads. Select the GitLab icon in VS Code to open the [sidebar](https://gitlab.com/gitlab-org/gitlab-vscode-extension#sidebar-details) to display **Merge requests I'm reviewing**. Select a merge request overview to view the complete details and discussions of the merge request. + + The sidebar also contains a list of all the changed files in the merge request. Selecting files opens a diff comparison for you to review the changes in VS Code. While viewing the diff, you can read feedback left on the files, and create new comments by selecting a line number and creating your comment. All comments and feedback you provide in VS Code are available in the GitLab web interface, making it easy for you to perform your reviews in VS Code, and other users to participate in GitLab. + + We're really excited about bringing the complete merge request review process to you inside of VS Code. Let us know what you think by [opening an issue](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/new?issue%5Bmilestone_id%5D=) for GitLab Workflow. + stage: Create + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/main/README.md + image_url: https://img.youtube.com/vi/F5ypjlOZ4-0/hqdefault.jpg + published_at: 2021-06-22 + release: 14.0 +- title: Track usage of Code Owners + body: | + Code Owners are an important piece of the code review process in GitLab. When code owners are clearly identified, contributors can see who should review contributions to a file or repository. The Code Owners feature can also be used to establish a merge request approval process. Now, you can track which teams across your organization are using the Code Owners feature in their development workflow. + + If you would like to drive adoption of Code Owners, sort the DevOps Adoption table by the Code Owners column to find teams that haven't yet adopted the feature so you can easily identify which teams need help getting started. Alternatively, find teams that have successfully configured Code Owners and get tips and feedback. The DevOps Adoption table is available at [the group level](https://docs.gitlab.com/ee/user/group/devops_adoption/) and [the instance level](https://docs.gitlab.com/ee/user/admin_area/analytics/dev_ops_report.html#devops-adoption). + stage: Manage + self-managed: true + gitlab-com: true + packages: [Ultimate] + url: https://docs.gitlab.com/ee/user/admin_area/analytics/dev_ops_report#devops-adoption + image_url: https://about.gitlab.com/images/14_0/codeownersadoption.png + published_at: 2021-06-22 + release: 14.0 +- title: Set pronouns on GitLab user profiles + body: | + Pronouns have been added to GitLab user profiles. The pronouns appear next to user names in the **Profile** tab. You can: + + - Decide whether or not to add pronouns to your profile. + - Self-identify and enter whatever pronouns you prefer, without selecting from a predefined list. + + Besides being more inclusive, GitLab wants help people use the correct pronouns when replying to comments to respect people's identity. + stage: Manage + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://docs.gitlab.com/ee/user/profile/#add-your-gender-pronouns + image_url: https://about.gitlab.com/images/14_0/pronouns.png + published_at: 2021-06-22 + release: 14.0 +- title: Container Scanning Integration with Trivy + body: | + Container scanning in GitLab now uses the Trivy engine by default. This change provides customers with more timely vulnerability intelligence updates, more accurate results, and support for a larger number of operating systems. Users who run container scanning with default settings are switched seamlessly and automatically to the new engine in GitLab 14.0. Users who customize the variables in their container scanning job should review our [migration guide](https://docs.gitlab.com/ee/user/application_security/container_scanning/#change-scanners) and make any necessary updates. + stage: Protect + self-managed: true + gitlab-com: true + packages: [Ultimate] + url: https://docs.gitlab.com/ee/user/application_security/container_scanning + image_url: https://about.gitlab.com/images/14_0/trivy_scanning_engine.png + published_at: 2021-06-22 + release: 14.0 +- title: Aggregate identical DAST vulnerabilities into a single vulnerability + body: | + In GitLab 13.12 and earlier, all DAST vulnerabilities found in a scan were listed individually for each URL the vulnerability was found on. This could create many vulnerabilities when the fix was a single file or configuration change. For example: an issue with a server header sent with every HTTP response would be reported on every page on the site, rather than reported as a single issue with multiple occurrences. + + To reduce the overhead of managing vulnerabilities, GitLab combines identical vulnerabilities found on multiple pages into a single reported vulnerability in the DAST report. The vulnerability details include a list of all the URLs where the vulnerability was found, rather than individual vulnerabilities being created in the vulnerability list and dashboard for each page. + + This new reporting functionality will not retroactively combine vulnerabilities found in previous scans. It only applies to scans performed in GitLab 14.0 and later. + stage: Secure + self-managed: true + gitlab-com: true + packages: [Ultimate] + url: https://docs.gitlab.com/ee/user/application_security/dast/#reports + image_url: https://about.gitlab.com/images/14_0/dast_aggregated_urls.png + published_at: 2021-06-22 + release: 14.0 +- title: Epic Boards + body: | + Epic Boards align teams and organizations by communicating the status of epics continuously. Previous versions of GitLab required you to view and sort epics in a list to view the overall status. Keeping epics up to date meant making most changes through an epic's detail page. Epic Boards enable you to visualize and refine all of your epics in one place, using a customizable, drag-and-drop interface that is easy for any teammate to understand and collaborate. + + Epic Boards are also a game-changer for managing and visualizing ideal epic workflows, such as authoring workflow states (Draft, Writing, Done), DevOps workflow states (such as Planned, In Development, and In Production), or any other mutually exclusive states you might model with scoped labels. Visualizing workflows with an Epic Board empowers you to increase predictability and efficiency. + stage: Plan + self-managed: true + gitlab-com: true + packages: [Premium, Ultimate] + url: https://docs.gitlab.com/ee/user/group/epics/epic_boards.html + image_url: https://about.gitlab.com/images/14_0/epic-boards.png + published_at: 2021-06-22 + release: 14.0 +- title: Edit wiki pages with the WYSIWYG Markdown editor + body: | + Editing wiki content could be so much easier! Many GitLab wikis use Markdown formatting, and for some users, Markdown is a barrier to efficient collaboration. In this release, you now have access to a rich, modern Markdown editing experience in your wiki, so you can edit with confidence. + + Instant feedback and visual editing tools help make wiki editing more intuitive, and remove barriers to collaboration. GitLab saves the changes as Markdown when you're done, so users who want to edit the Markdown directly can do so. You can even type Markdown into the new editor and it will automatically format the text as you type. + + GitLab 14.0 introduces the [Content Editor](https://gitlab.com/groups/gitlab-org/-/epics/5401) into the Wiki with support for most of the basic Markdown content types like headers, bold and italic text, lists, code blocks, and links. [Full support](https://gitlab.com/groups/gitlab-org/-/epics/5438) for the entire [GitLab Flavored Markdown specification](https://docs.gitlab.com/ee/user/markdown.html) will arrive in upcoming releases. We also plan to make the Content Editor available in other areas of GitLab in the future. We welcome input on this early MVC in [this feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/332629). + stage: Create + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://docs.gitlab.com/ee/user/project/wiki/#content-editor + image_url: https://about.gitlab.com/images/14_0/wiki-content-editor-demo.gif + published_at: 2021-06-22 + release: 14.0 +- title: Change an issue's type + body: | + In some cases, you may wish to change an issue's type. For example, you may want to escalate an issue to an [incident](https://docs.gitlab.com/ee/operations/incident_management/index.html) to ensure that your team handles the problem properly. To change an issue's type, edit the issue and select an issue type from the **Issue type** selector menu. + stage: Monitor + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#change-the-issue-type + image_url: https://about.gitlab.com/images/14_0/change_type_on_issues.png + published_at: 2021-06-22 + release: 14.0 +- title: Prepopulate the CI/CD pipeline editor with an initial template + body: | + The pipeline editor in GitLab is your one-stop shop when interacting with CI/CD pipelines. Previously, when writing your first pipeline with the editor, you were presented with a blank configuration. While perfectly useful for experienced pipeline authors, it was a bit of a leap for those just starting out. + + In this release, if a project does not have a pipeline configured, the editor preloads a template showing an example 3-stage pipeline. You can save and run this pipeline right away to see it in action in your project. On top of that, it also has comments that help you understand the syntax, and tips and hints to help you start customizing the template to match your needs. It is now much easier to get your first green pipeline! + stage: Verify + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://docs.gitlab.com/ee/ci/pipeline_editor/ + image_url: https://about.gitlab.com/images/14_0/template.png + published_at: 2021-06-22 + release: 14.0 +- title: Terraform module registry built into GitLab + body: | + Terraform modules play a central role in building standard infrastructure components throughout an organization. Up to GitLab 13.12, GitLab users had to use either a third-party Terraform module registry, local modules, or Git-based modules. While these options work well, they do not help with the distribution of the modules and they lack proper versioning support, which introduces risks for module users. GitLab 14.0 extends our [Infrastructure-as-Code offerings](https://docs.gitlab.com/ee/user/infrastructure/) with a Terraform module registry. Now, you can use the Terraform module registry built into GitLab to discover Terraform modules with semantic versioning support for upgrades and maintenance. Moreover, you can publish modules easily using GitLab CI/CD. + + While following Terraform's best practices, we recommend developing each Terraform module in a dedicated GitLab project. To simplify the transition to the registry, users can host and publish multiple modules from a single GitLab repository. You can learn more about publishing and consuming a new module [in our documentation](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/index.html). + stage: Configure + self-managed: true + gitlab-com: true + packages: [Free, Premium, Ultimate] + url: https://docs.gitlab.com/ee/user/packages/terraform_module_registry/index.html + image_url: https://about.gitlab.com/images/14_0/terraform-module-registry.png + published_at: 2021-06-22 + release: 14.0 diff --git a/doc/administration/auditor_users.md b/doc/administration/auditor_users.md index 96bfbd88ddf..5f31ed709f2 100644 --- a/doc/administration/auditor_users.md +++ b/doc/administration/auditor_users.md @@ -53,17 +53,16 @@ helpful: you can create an Auditor user and then share the credentials with those users to which you want to grant access. -## Adding an Auditor user +## Add an Auditor user -To create a new Auditor user: +To create an Auditor user: -1. Create a new user or edit an existing one by navigating to - **Admin Area > Users**. The option of the access level is located in - the 'Access' section. - - ![Admin Area Form](img/auditor_access_form.png) - -1. Select **Save changes** or **Create user** for the changes to take effect. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Overview > Users**. +1. Create a new user or edit an existing one, and in the **Access** section + select Auditor. +1. Select **Create user** or **Save changes** if you created a new user or + edited an existing one respectively. To revoke Auditor permissions from a user, make them a regular user by following the previous steps. diff --git a/doc/administration/auth/ldap/ldap-troubleshooting.md b/doc/administration/auth/ldap/ldap-troubleshooting.md index acafe52007b..1215d90134f 100644 --- a/doc/administration/auth/ldap/ldap-troubleshooting.md +++ b/doc/administration/auth/ldap/ldap-troubleshooting.md @@ -357,8 +357,8 @@ things to check to debug the situation. LDAP yet and must do so first. - You've waited an hour or [the configured interval](index.md#adjusting-ldap-group-sync-schedule) for the group to - sync. To speed up the process, either go to the GitLab group **Settings -> - Members** and press **Sync now** (sync one group) or [run the group sync Rake + sync. To speed up the process, either go to the GitLab group **Group information > Members** + and press **Sync now** (sync one group) or [run the group sync Rake task](../../raketasks/ldap.md#run-a-group-sync) (sync all groups). If all of the above looks good, jump in to a little more advanced debugging in diff --git a/doc/administration/geo/disaster_recovery/background_verification.md b/doc/administration/geo/disaster_recovery/background_verification.md index 8d3745130bd..f03cd64c14e 100644 --- a/doc/administration/geo/disaster_recovery/background_verification.md +++ b/doc/administration/geo/disaster_recovery/background_verification.md @@ -58,19 +58,25 @@ Feature.enable('geo_repository_verification') ## Repository verification -Go to the **Admin Area > Geo** dashboard on the **primary** node and expand -the **Verification information** tab for that node to view automatic checksumming -status for repositories and wikis. Successes are shown in green, pending work -in gray, and failures in red. +On the **primary** node: -![Verification status](img/verification-status-primary.png) +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Expand **Verification information** tab for that node to view automatic checksumming + status for repositories and wikis. Successes are shown in green, pending work + in gray, and failures in red. -Go to the **Admin Area > Geo** dashboard on the **secondary** node and expand -the **Verification information** tab for that node to view automatic verification -status for repositories and wikis. As with checksumming, successes are shown in -green, pending work in gray, and failures in red. + ![Verification status](img/verification_status_primary_v14_0.png) -![Verification status](img/verification-status-secondary.png) +On the **secondary** node: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Expand **Verification information** tab for that node to view automatic checksumming + status for repositories and wikis. Successes are shown in green, pending work + in gray, and failures in red. + + ![Verification status](img/verification_status_secondary_v14_0.png) ## Using checksums to compare Geo nodes @@ -92,11 +98,14 @@ data. The default and recommended re-verification interval is 7 days, though an interval as short as 1 day can be set. Shorter intervals reduce risk but increase load and vice versa. -Go to the **Admin Area > Geo** dashboard on the **primary** node, and -click the **Edit** button for the **primary** node to customize the minimum -re-verification interval: +On the **primary** node: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Select **Edit** for the **primary** node to customize the minimum + re-verification interval: -![Re-verification interval](img/reverification-interval.png) + ![Re-verification interval](img/reverification-interval.png) The automatic background re-verification is enabled by default, but you can disable if you need. Run the following commands in a Rails console on the @@ -141,17 +150,19 @@ sudo gitlab-rake geo:verification:wiki:reset If the **primary** and **secondary** nodes have a checksum verification mismatch, the cause may not be apparent. To find the cause of a checksum mismatch: -1. Go to the **Admin Area > Overview > Projects** dashboard on the **primary** node, find the - project that you want to check the checksum differences and click on the - **Edit** button: - ![Projects dashboard](img/checksum-differences-admin-projects.png) +1. On the **primary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Overview > Projects**. + 1. Find the project that you want to check the checksum differences and + select its name. + 1. On the project administration page get the **Gitaly storage name**, + and **Gitaly relative path**. -1. On the project administration page get the **Gitaly storage name**, and **Gitaly relative path**: - ![Project administration page](img/checksum-differences-admin-project-page.png) + ![Project administration page](img/checksum-differences-admin-project-page.png) 1. Go to the project's repository directory on both **primary** and **secondary** nodes (the path is usually `/var/opt/gitlab/git-data/repositories`). Note that if `git_data_dirs` - is customized, check the directory layout on your server to be sure. + is customized, check the directory layout on your server to be sure: ```shell cd /var/opt/gitlab/git-data/repositories diff --git a/doc/administration/geo/disaster_recovery/img/checksum-differences-admin-projects.png b/doc/administration/geo/disaster_recovery/img/checksum-differences-admin-projects.png Binary files differdeleted file mode 100644 index 85759d903a4..00000000000 --- a/doc/administration/geo/disaster_recovery/img/checksum-differences-admin-projects.png +++ /dev/null diff --git a/doc/administration/geo/disaster_recovery/img/replication-status.png b/doc/administration/geo/disaster_recovery/img/replication-status.png Binary files differdeleted file mode 100644 index d7085927c75..00000000000 --- a/doc/administration/geo/disaster_recovery/img/replication-status.png +++ /dev/null diff --git a/doc/administration/geo/disaster_recovery/img/verification-status-primary.png b/doc/administration/geo/disaster_recovery/img/verification-status-primary.png Binary files differdeleted file mode 100644 index 2503408ec5d..00000000000 --- a/doc/administration/geo/disaster_recovery/img/verification-status-primary.png +++ /dev/null diff --git a/doc/administration/geo/disaster_recovery/img/verification-status-secondary.png b/doc/administration/geo/disaster_recovery/img/verification-status-secondary.png Binary files differdeleted file mode 100644 index 462274d8b14..00000000000 --- a/doc/administration/geo/disaster_recovery/img/verification-status-secondary.png +++ /dev/null diff --git a/doc/administration/geo/disaster_recovery/img/verification_status_primary_v14_0.png b/doc/administration/geo/disaster_recovery/img/verification_status_primary_v14_0.png Binary files differnew file mode 100644 index 00000000000..9d2537a18bf --- /dev/null +++ b/doc/administration/geo/disaster_recovery/img/verification_status_primary_v14_0.png diff --git a/doc/administration/geo/disaster_recovery/img/verification_status_secondary_v14_0.png b/doc/administration/geo/disaster_recovery/img/verification_status_secondary_v14_0.png Binary files differnew file mode 100644 index 00000000000..3b4ff9f393b --- /dev/null +++ b/doc/administration/geo/disaster_recovery/img/verification_status_secondary_v14_0.png diff --git a/doc/administration/geo/disaster_recovery/planned_failover.md b/doc/administration/geo/disaster_recovery/planned_failover.md index d50078da172..5c15523ac78 100644 --- a/doc/administration/geo/disaster_recovery/planned_failover.md +++ b/doc/administration/geo/disaster_recovery/planned_failover.md @@ -109,13 +109,16 @@ The maintenance window won't end until Geo replication and verification is completely finished. To keep the window as short as possible, you should ensure these processes are close to 100% as possible during active use. -Go to the **Admin Area > Geo** dashboard on the **secondary** node to -review status. Replicated objects (shown in green) should be close to 100%, -and there should be no failures (shown in red). If a large proportion of -objects aren't yet replicated (shown in gray), consider giving the node more -time to complete +On the **secondary** node: -![Replication status](img/replication-status.png) +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. + Replicated objects (shown in green) should be close to 100%, + and there should be no failures (shown in red). If a large proportion of + objects aren't yet replicated (shown in gray), consider giving the node more + time to complete + + ![Replication status](../replication/img/geo_node_dashboard_v14_0.png) If any objects are failing to replicate, this should be investigated before scheduling the maintenance window. Following a planned failover, anything that @@ -134,23 +137,26 @@ This [content was moved to another location](background_verification.md). ### Notify users of scheduled maintenance -On the **primary** node, navigate to **Admin Area > Messages**, add a broadcast -message. You can check under **Admin Area > Geo** to estimate how long it -takes to finish syncing. An example message would be: +On the **primary** node: -> A scheduled maintenance takes place at XX:XX UTC. We expect it to take -> less than 1 hour. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Messages**. +1. Add a message notifying users on the maintenance window. + You can check under **Geo > Nodes** to estimate how long it + takes to finish syncing. +1. Select **Add broadcast message**. ## Prevent updates to the **primary** node To ensure that all data is replicated to a secondary site, updates (write requests) need to -be disabled on the primary site: - -1. Enable [maintenance mode](../../maintenance_mode/index.md). - -1. Disable non-Geo periodic background jobs on the **primary** node by navigating - to **Admin Area > Monitoring > Background Jobs > Cron**, pressing `Disable All`, - and then pressing `Enable` for the `geo_sidekiq_cron_config_worker` cron job. +be disabled on the **primary** site: + +1. Enable [maintenance mode](../../maintenance_mode/index.md) on the **primary** node. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Monitoring > Background Jobs**. +1. On the Sidekiq dashboard, select **Cron**. +1. Select `Disable All` to disable non-Geo periodic background jobs. +1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job. This job re-enables several other cron jobs that are essential for planned failover to complete successfully. @@ -158,23 +164,28 @@ be disabled on the primary site: 1. If you are manually replicating any data not managed by Geo, trigger the final replication process now. -1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues** - and wait for all queues except those with `geo` in the name to drop to 0. - These queues contain work that has been submitted by your users; failing over - before it is completed, causes the work to be lost. -1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the - following conditions to be true of the **secondary** node you are failing over to: - - - All replication meters to each 100% replicated, 0% failures. - - All verification meters reach 100% verified, 0% failures. - - Database replication lag is 0ms. - - The Geo log cursor is up to date (0 events behind). - -1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues** - and wait for all the `geo` queues to drop to 0 queued and 0 running jobs. -1. On the **secondary** node, use [these instructions](../../raketasks/check.md) - to verify the integrity of CI artifacts, LFS objects, and uploads in file - storage. +1. On the **primary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except + those with `geo` in the name to drop to 0. + These queues contain work that has been submitted by your users; failing over + before it is completed, causes the work to be lost. + 1. On the left sidebar, select **Geo > Nodes** and wait for the + following conditions to be true of the **secondary** node you are failing over to: + + - All replication meters reach 100% replicated, 0% failures. + - All verification meters reach 100% verified, 0% failures. + - Database replication lag is 0ms. + - The Geo log cursor is up to date (0 events behind). + +1. On the **secondary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo` + queues to drop to 0 queued and 0 running jobs. + 1. [Run an integrity check](../../raketasks/check.md) to verify the integrity + of CI artifacts, LFS objects, and uploads in file storage. At this point, your **secondary** node contains an up-to-date copy of everything the **primary** node has, meaning nothing was lost when you fail over. diff --git a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md index 3227fafca0f..4cfe781c7a4 100644 --- a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md +++ b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_multi_node.md @@ -63,13 +63,16 @@ Before following any of those steps, make sure you have `root` access to the **secondary** to promote it, since there isn't provided an automated way to promote a Geo replica and perform a failover. -On the **secondary** node, navigate to the **Admin Area > Geo** dashboard to -review its status. Replicated objects (shown in green) should be close to 100%, -and there should be no failures (shown in red). If a large proportion of -objects aren't yet replicated (shown in gray), consider giving the node more -time to complete. +On the **secondary** node: -![Replication status](../img/replication-status.png) +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes** to see its status. + Replicated objects (shown in green) should be close to 100%, + and there should be no failures (shown in red). If a large proportion of + objects aren't yet replicated (shown in gray), consider giving the node more + time to complete. + + ![Replication status](../../replication/img/geo_node_dashboard_v14_0.png) If any objects are failing to replicate, this should be investigated before scheduling the maintenance window. After a planned failover, anything that @@ -126,11 +129,14 @@ follow these steps to avoid unnecessary data loss: existing Git repository with an SSH remote URL. The server should refuse connection. - 1. On the **primary** node, disable non-Geo periodic background jobs by navigating - to **Admin Area > Monitoring > Background Jobs > Cron**, clicking `Disable All`, - and then clicking `Enable` for the `geo_sidekiq_cron_config_worker` cron job. - This job will re-enable several other cron jobs that are essential for planned - failover to complete successfully. + 1. On the **primary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dhasboard, select **Cron**. + 1. Select `Disable All` to disable any non-Geo periodic background jobs. + 1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job. + This job will re-enable several other cron jobs that are essential for planned + failover to complete successfully. 1. Finish replicating and verifying all data: @@ -141,22 +147,28 @@ follow these steps to avoid unnecessary data loss: 1. If you are manually replicating any [data not managed by Geo](../../replication/datatypes.md#limitations-on-replicationverification), trigger the final replication process now. - 1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues** - and wait for all queues except those with `geo` in the name to drop to 0. - These queues contain work that has been submitted by your users; failing over - before it is completed will cause the work to be lost. - 1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the - following conditions to be true of the **secondary** node you are failing over to: - - All replication meters to each 100% replicated, 0% failures. - - All verification meters reach 100% verified, 0% failures. - - Database replication lag is 0ms. - - The Geo log cursor is up to date (0 events behind). - - 1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues** - and wait for all the `geo` queues to drop to 0 queued and 0 running jobs. - 1. On the **secondary** node, use [these instructions](../../../raketasks/check.md) - to verify the integrity of CI artifacts, LFS objects, and uploads in file - storage. + 1. On the **primary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except + those with `geo` in the name to drop to 0. + These queues contain work that has been submitted by your users; failing over + before it is completed, causes the work to be lost. + 1. On the left sidebar, select **Geo > Nodes** and wait for the + following conditions to be true of the **secondary** node you are failing over to: + + - All replication meters reach 100% replicated, 0% failures. + - All verification meters reach 100% verified, 0% failures. + - Database replication lag is 0ms. + - The Geo log cursor is up to date (0 events behind). + + 1. On the **secondary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo` + queues to drop to 0 queued and 0 running jobs. + 1. [Run an integrity check](../../../raketasks/check.md) to verify the integrity + of CI artifacts, LFS objects, and uploads in file storage. At this point, your **secondary** node will contain an up-to-date copy of everything the **primary** node has, meaning nothing will be lost when you fail over. diff --git a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md index 7f311d172ef..6caeddad51a 100644 --- a/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md +++ b/doc/administration/geo/disaster_recovery/runbooks/planned_failover_single_node.md @@ -57,7 +57,7 @@ and there should be no failures (shown in red). If a large proportion of objects aren't yet replicated (shown in gray), consider giving the node more time to complete. -![Replication status](../img/replication-status.png) +![Replication status](../../replication/img/geo_node_dashboard_v14_0.png) If any objects are failing to replicate, this should be investigated before scheduling the maintenance window. After a planned failover, anything that @@ -114,11 +114,14 @@ follow these steps to avoid unnecessary data loss: existing Git repository with an SSH remote URL. The server should refuse connection. - 1. On the **primary** node, disable non-Geo periodic background jobs by navigating - to **Admin Area > Monitoring > Background Jobs > Cron**, clicking `Disable All`, - and then clicking `Enable` for the `geo_sidekiq_cron_config_worker` cron job. - This job will re-enable several other cron jobs that are essential for planned - failover to complete successfully. + 1. On the **primary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dhasboard, select **Cron**. + 1. Select `Disable All` to disable any non-Geo periodic background jobs. + 1. Select `Enable` for the `geo_sidekiq_cron_config_worker` cron job. + This job will re-enable several other cron jobs that are essential for planned + failover to complete successfully. 1. Finish replicating and verifying all data: @@ -129,22 +132,28 @@ follow these steps to avoid unnecessary data loss: 1. If you are manually replicating any [data not managed by Geo](../../replication/datatypes.md#limitations-on-replicationverification), trigger the final replication process now. - 1. On the **primary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues** - and wait for all queues except those with `geo` in the name to drop to 0. - These queues contain work that has been submitted by your users; failing over - before it is completed will cause the work to be lost. - 1. On the **primary** node, navigate to **Admin Area > Geo** and wait for the - following conditions to be true of the **secondary** node you are failing over to: - - All replication meters to each 100% replicated, 0% failures. - - All verification meters reach 100% verified, 0% failures. - - Database replication lag is 0ms. - - The Geo log cursor is up to date (0 events behind). - - 1. On the **secondary** node, navigate to **Admin Area > Monitoring > Background Jobs > Queues** - and wait for all the `geo` queues to drop to 0 queued and 0 running jobs. - 1. On the **secondary** node, use [these instructions](../../../raketasks/check.md) - to verify the integrity of CI artifacts, LFS objects, and uploads in file - storage. + 1. On the **primary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dashboard, select **Queues**, and wait for all queues except + those with `geo` in the name to drop to 0. + These queues contain work that has been submitted by your users; failing over + before it is completed, causes the work to be lost. + 1. On the left sidebar, select **Geo > Nodes** and wait for the + following conditions to be true of the **secondary** node you are failing over to: + + - All replication meters reach 100% replicated, 0% failures. + - All verification meters reach 100% verified, 0% failures. + - Database replication lag is 0ms. + - The Geo log cursor is up to date (0 events behind). + + 1. On the **secondary** node: + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Monitoring > Background Jobs**. + 1. On the Sidekiq dashboard, select **Queues**, and wait for all the `geo` + queues to drop to 0 queued and 0 running jobs. + 1. [Run an integrity check](../../../raketasks/check.md) to verify the integrity + of CI artifacts, LFS objects, and uploads in file storage. At this point, your **secondary** node will contain an up-to-date copy of everything the **primary** node has, meaning nothing will be lost when you fail over. diff --git a/doc/administration/geo/replication/configuration.md b/doc/administration/geo/replication/configuration.md index 6d5f3e61ba0..926c4c565aa 100644 --- a/doc/administration/geo/replication/configuration.md +++ b/doc/administration/geo/replication/configuration.md @@ -196,9 +196,9 @@ keys must be manually replicated to the **secondary** node. gitlab-ctl reconfigure ``` -1. Visit the **primary** node's **Admin Area > Geo** - (`/admin/geo/nodes`) in your browser. -1. Click the **New node** button. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Select **New node**. ![Add secondary node](img/adding_a_secondary_node_v13_3.png) 1. Fill in **Name** with the `gitlab_rails['geo_node_name']` in `/etc/gitlab/gitlab.rb`. These values must always match *exactly*, character @@ -209,7 +209,7 @@ keys must be manually replicated to the **secondary** node. 1. Optionally, choose which groups or storage shards should be replicated by the **secondary** node. Leave blank to replicate all. Read more in [selective synchronization](#selective-synchronization). -1. Click the **Add node** button to add the **secondary** node. +1. Select **Add node** to add the **secondary** node. 1. SSH into your GitLab **secondary** server and restart the services: ```shell @@ -252,24 +252,28 @@ on the **secondary** node. Geo synchronizes repositories over HTTP/HTTPS, and therefore requires this clone method to be enabled. This is enabled by default, but if converting an existing node to Geo it should be checked: -1. Go to **Admin Area > Settings** (`/admin/application_settings/general`) on the **primary** node. -1. Expand "Visibility and access controls". +On the **primary** node: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Settings > General**. +1. Expand **Visibility and access controls**. 1. Ensure "Enabled Git access protocols" is set to either "Both SSH and HTTP(S)" or "Only HTTP(S)". ### Step 6. Verify proper functioning of the **secondary** node -Your **secondary** node is now configured! +You can sign in to the **secondary** node with the same credentials you used with +the **primary** node. After you sign in: -You can sign in to the _secondary_ node with the same credentials you used with -the _primary_ node. Visit the _secondary_ node's **Admin Area > Geo** -(`/admin/geo/nodes`) in your browser to determine if it's correctly identified -as a _secondary_ Geo node, and if Geo is enabled. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Verify that it's correctly identified as a **secondary** Geo node, and that + Geo is enabled. The initial replication, or 'backfill', is probably still in progress. You can monitor the synchronization process on each Geo node from the **primary** node's **Geo Nodes** dashboard in your browser. -![Geo dashboard](img/geo_node_dashboard.png) +![Geo dashboard](img/geo_node_dashboard_v14_0.png) If your installation isn't working properly, check the [troubleshooting document](troubleshooting.md). diff --git a/doc/administration/geo/replication/datatypes.md b/doc/administration/geo/replication/datatypes.md index a1461a64518..6989765dbad 100644 --- a/doc/administration/geo/replication/datatypes.md +++ b/doc/administration/geo/replication/datatypes.md @@ -189,6 +189,7 @@ successfully, you must replicate their data using some other means. |[Object pools for forked project deduplication](../../../development/git_object_deduplication.md) | **Yes** | No | No | | |[Container Registry](../../packages/container_registry.md) | **Yes** (12.3) | No | No | Disabled by default. See [instructions](docker_registry.md) to enable. | |[Content in object storage (beta)](object_storage.md) | **Yes** (12.4) | [No](https://gitlab.com/gitlab-org/gitlab/-/issues/13845) | No | | +|[Infrastructure Registry for Terraform Module](../../../user/packages/terraform_module_registry/index.md) | **Yes** (14.0) | [**Yes**](#limitation-of-verification-for-files-in-object-storage) (14.0) | Via Object Storage provider if supported. Native Geo support (Beta). | Behind feature flag `geo_package_file_replication`, enabled by default. | |[Project designs repository](../../../user/project/issues/design_management.md) | **Yes** (12.7) | [No](https://gitlab.com/gitlab-org/gitlab/-/issues/32467) | No | Designs also require replication of LFS objects and Uploads. | |[Package Registry for npm](../../../user/packages/npm_registry/index.md) | **Yes** (13.2) | [**Yes**](#limitation-of-verification-for-files-in-object-storage) (13.10) | Via Object Storage provider if supported. Native Geo support (Beta). | Behind feature flag `geo_package_file_replication`, enabled by default. | |[Package Registry for Maven](../../../user/packages/maven_repository/index.md) | **Yes** (13.2) | [**Yes**](#limitation-of-verification-for-files-in-object-storage) (13.10) | Via Object Storage provider if supported. Native Geo support (Beta). | Behind feature flag `geo_package_file_replication`, enabled by default. | diff --git a/doc/administration/geo/replication/disable_geo.md b/doc/administration/geo/replication/disable_geo.md index c71cf80d0c1..ba01c55a157 100644 --- a/doc/administration/geo/replication/disable_geo.md +++ b/doc/administration/geo/replication/disable_geo.md @@ -33,9 +33,12 @@ to do that. ## Remove the primary site from the UI -1. Go to **Admin Area > Geo** (`/admin/geo/nodes`). -1. Click the **Remove** button for the **primary** node. -1. Confirm by clicking **Remove** when the prompt appears. +To remove the **primary** site: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Select **Remove** for the **primary** node. +1. Confirm by selecting **Remove** when the prompt appears. ## Remove secondary replication slots diff --git a/doc/administration/geo/replication/docker_registry.md b/doc/administration/geo/replication/docker_registry.md index a8628481ba7..cc0719442a1 100644 --- a/doc/administration/geo/replication/docker_registry.md +++ b/doc/administration/geo/replication/docker_registry.md @@ -124,7 +124,10 @@ For each application and Sidekiq node on the **secondary** site: ### Verify replication -To verify Container Registry replication is working, go to **Admin Area > Geo** -(`/admin/geo/nodes`) on the **secondary** site. -The initial replication, or "backfill", is probably still in progress. +To verify Container Registry replication is working, on the **secondary** site: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. + The initial replication, or "backfill", is probably still in progress. + You can monitor the synchronization process on each Geo site from the **primary** site's **Geo Nodes** dashboard in your browser. diff --git a/doc/administration/geo/replication/img/geo_architecture.png b/doc/administration/geo/replication/img/geo_architecture.png Binary files differindex aac63be41ff..90272537f43 100644 --- a/doc/administration/geo/replication/img/geo_architecture.png +++ b/doc/administration/geo/replication/img/geo_architecture.png diff --git a/doc/administration/geo/replication/img/geo_node_dashboard.png b/doc/administration/geo/replication/img/geo_node_dashboard.png Binary files differdeleted file mode 100644 index 8b9aceba825..00000000000 --- a/doc/administration/geo/replication/img/geo_node_dashboard.png +++ /dev/null diff --git a/doc/administration/geo/replication/img/geo_node_dashboard_v14_0.png b/doc/administration/geo/replication/img/geo_node_dashboard_v14_0.png Binary files differnew file mode 100644 index 00000000000..6d183fc6bd2 --- /dev/null +++ b/doc/administration/geo/replication/img/geo_node_dashboard_v14_0.png diff --git a/doc/administration/geo/replication/img/geo_node_health_v14_0.png b/doc/administration/geo/replication/img/geo_node_health_v14_0.png Binary files differnew file mode 100644 index 00000000000..4c640522569 --- /dev/null +++ b/doc/administration/geo/replication/img/geo_node_health_v14_0.png diff --git a/doc/administration/geo/replication/object_storage.md b/doc/administration/geo/replication/object_storage.md index 7dd831092a3..90a41ed3e1c 100644 --- a/doc/administration/geo/replication/object_storage.md +++ b/doc/administration/geo/replication/object_storage.md @@ -21,7 +21,7 @@ To have: [Read more about using object storage with GitLab](../../object_storage.md). -## Enabling GitLab managed object storage replication +## Enabling GitLab-managed object storage replication > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10586) in GitLab 12.4. @@ -31,10 +31,11 @@ This is a [**beta** feature](https://about.gitlab.com/handbook/product/#beta) an **Secondary** sites can replicate files stored on the **primary** site regardless of whether they are stored on the local file system or in object storage. -To enable GitLab replication, you must: +To enable GitLab replication: -1. Go to **Admin Area > Geo**. -1. Press **Edit** on the **secondary** site. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Select **Edit** on the **secondary** site. 1. In the **Synchronization Settings** section, find the **Allow this secondary node to replicate content on Object Storage** checkbox to enable it. diff --git a/doc/administration/geo/replication/remove_geo_site.md b/doc/administration/geo/replication/remove_geo_site.md index a42a4c4eb47..274eb28dbc9 100644 --- a/doc/administration/geo/replication/remove_geo_site.md +++ b/doc/administration/geo/replication/remove_geo_site.md @@ -9,7 +9,8 @@ type: howto **Secondary** sites can be removed from the Geo cluster using the Geo administration page of the **primary** site. To remove a **secondary** site: -1. Go to **Admin Area > Geo** (`/admin/geo/nodes`). +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. 1. Select the **Remove** button for the **secondary** site you want to remove. 1. Confirm by selecting **Remove** when the prompt appears. diff --git a/doc/administration/geo/replication/troubleshooting.md b/doc/administration/geo/replication/troubleshooting.md index 1fd923dbaf1..c00f523957c 100644 --- a/doc/administration/geo/replication/troubleshooting.md +++ b/doc/administration/geo/replication/troubleshooting.md @@ -25,8 +25,12 @@ Before attempting more advanced troubleshooting: ### Check the health of the **secondary** node -Visit the **primary** node's **Admin Area > Geo** (`/admin/geo/nodes`) in -your browser. We perform the following health checks on each **secondary** node +On the **primary** node: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. + +We perform the following health checks on each **secondary** node to help identify if something is wrong: - Is the node running? @@ -35,7 +39,7 @@ to help identify if something is wrong: - Is the node's secondary tracking database connected? - Is the node's secondary tracking database up-to-date? -![Geo health check](img/geo_node_dashboard.png) +![Geo health check](img/geo_node_health_v14_0.png) For information on how to resolve common errors reported from the UI, see [Fixing Common Errors](#fixing-common-errors). @@ -129,7 +133,8 @@ Geo finds the current machine's Geo node name in `/etc/gitlab/gitlab.rb` by: - Using the `gitlab_rails['geo_node_name']` setting. - If that is not defined, using the `external_url` setting. -This name is used to look up the node with the same **Name** in **Admin Area > Geo**. +This name is used to look up the node with the same **Name** in the **Geo Nodes** +dashboard. To check if the current machine has a node name that matches a node in the database, run the check task: @@ -739,8 +744,11 @@ If you are able to log in to the **primary** node, but you receive this error when attempting to log into a **secondary**, you should check that the Geo node's URL matches its external URL. -1. On the primary, visit **Admin Area > Geo**. -1. Find the affected **secondary** and click **Edit**. +On the **primary** node: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Find the affected **secondary** site and select **Edit**. 1. Ensure the **URL** field matches the value found in `/etc/gitlab/gitlab.rb` in `external_url "https://gitlab.example.com"` on the frontend server(s) of the **secondary** node. diff --git a/doc/administration/geo/replication/tuning.md b/doc/administration/geo/replication/tuning.md index a4aad3dec68..9807f3e6444 100644 --- a/doc/administration/geo/replication/tuning.md +++ b/doc/administration/geo/replication/tuning.md @@ -7,20 +7,28 @@ type: howto # Tuning Geo **(PREMIUM SELF)** -## Changing the sync/verification capacity values +You can limit the number of concurrent operations the nodes can run +in the background. -In **Admin Area > Geo** (`/admin/geo/nodes`), -there are several variables that can be tuned to improve performance of Geo: +## Changing the sync/verification concurrency values -- Repository sync capacity -- File sync capacity -- Container repositories sync capacity -- Verification capacity +On the **primary** site: -Increasing capacity values will increase the number of jobs that are scheduled. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Select **Edit** of the secondary node you want to tune. +1. Under **Tuning settings**, there are several variables that can be tuned to + improve the performance of Geo: + + - Repository synchronization concurrency limit + - File synchronization concurrency limit + - Container repositories synchronization concurrency limit + - Verification concurrency limit + +Increasing the concurrency values will increase the number of jobs that are scheduled. However, this may not lead to more downloads in parallel unless the number of -available Sidekiq threads is also increased. For example, if repository sync -capacity is increased from 25 to 50, you may also want to increase the number +available Sidekiq threads is also increased. For example, if repository synchronization +concurrency is increased from 25 to 50, you may also want to increase the number of Sidekiq threads from 25 to 50. See the [Sidekiq concurrency documentation](../../operations/extra_sidekiq_processes.md#number-of-threads) for more details. diff --git a/doc/administration/housekeeping.md b/doc/administration/housekeeping.md index 9668b7277c2..a89e8a2bad5 100644 --- a/doc/administration/housekeeping.md +++ b/doc/administration/housekeeping.md @@ -9,25 +9,27 @@ info: To determine the technical writer assigned to the Stage/Group associated w GitLab supports and automates housekeeping tasks within your current repository, such as compressing file revisions and removing unreachable objects. -## Automatic housekeeping +## Configure housekeeping GitLab automatically runs `git gc` and `git repack` on repositories -after Git pushes. You can change how often this happens or turn it off in -**Admin Area > Settings > Repository** (`/admin/application_settings/repository`). +after Git pushes. -## Manual housekeeping +You can change how often this happens or turn it off: -The housekeeping function runs `repack` or `gc` depending on the -**Housekeeping** settings configured in **Admin Area > Settings > Repository**. +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Settings > Repository**. +1. Expand **Repository maintenance**. +1. Configure the Housekeeping options. +1. Select **Save changes**. -For example in the following scenario a `git repack -d` will be executed: +For example, in the following scenario a `git repack -d` will be executed: - Project: pushes since GC counter (`pushes_since_gc`) = `10` - Git GC period = `200` - Full repack period = `50` When the `pushes_since_gc` value is 50 a `repack -A -d --pack-kept-objects` runs, similarly when -the `pushes_since_gc` value is 200 a `git gc` runs. +the `pushes_since_gc` value is 200 a `git gc` runs: - `git gc` ([man page](https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-gc.html)) runs a number of housekeeping tasks, such as compressing file revisions (to reduce disk space and increase performance) @@ -38,12 +40,6 @@ the `pushes_since_gc` value is 200 a `git gc` runs. Housekeeping also [removes unreferenced LFS files](../raketasks/cleanup.md#remove-unreferenced-lfs-files) from your project on the same schedule as the `git gc` operation, freeing up storage space for your project. -To manually start the housekeeping process: - -1. In your project, go to **Settings > General**. -1. Expand the **Advanced** section. -1. Select **Run housekeeping**. - ## How housekeeping handles pool repositories Housekeeping for pool repositories is handled differently from standard repositories. diff --git a/doc/administration/img/auditor_access_form.png b/doc/administration/img/auditor_access_form.png Binary files differdeleted file mode 100644 index c179a7d3b0a..00000000000 --- a/doc/administration/img/auditor_access_form.png +++ /dev/null diff --git a/doc/administration/maintenance_mode/index.md b/doc/administration/maintenance_mode/index.md index c73a49287db..2f5d366f927 100644 --- a/doc/administration/maintenance_mode/index.md +++ b/doc/administration/maintenance_mode/index.md @@ -21,10 +21,11 @@ Maintenance Mode allows most external actions that do not change internal state. There are three ways to enable Maintenance Mode as an administrator: - **Web UI**: - 1. Go to **Admin Area > Settings > General**, expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**. + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Settings > General**. + 1. Expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**. You can optionally add a message for the banner as well. - - 1. Click **Save** for the changes to take effect. + 1. Select **Save changes**. - **API**: @@ -44,9 +45,11 @@ There are three ways to enable Maintenance Mode as an administrator: There are three ways to disable Maintenance Mode: - **Web UI**: - 1. Go to **Admin Area > Settings > General**, expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**. - - 1. Click **Save** for the changes to take effect. + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Settings > General**. + 1. Expand **Maintenance Mode**, and toggle **Enable Maintenance Mode**. + You can optionally add a message for the banner as well. + 1. Select **Save changes**. - **API**: @@ -166,7 +169,10 @@ Background jobs (cron jobs, Sidekiq) continue running as is, because background [During a planned Geo failover](../geo/disaster_recovery/planned_failover.md#prevent-updates-to-the-primary-node), it is recommended that you disable all cron jobs except for those related to Geo. -You can monitor queues and disable jobs in **Admin Area > Monitoring > Background Jobs**. +To monitor queues and disable jobs: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Monitoring > Background Jobs**. ### Incident management diff --git a/doc/administration/operations/extra_sidekiq_processes.md b/doc/administration/operations/extra_sidekiq_processes.md index ed89d11da75..b910a789d29 100644 --- a/doc/administration/operations/extra_sidekiq_processes.md +++ b/doc/administration/operations/extra_sidekiq_processes.md @@ -87,10 +87,10 @@ To start multiple processes: sudo gitlab-ctl reconfigure ``` -After the extra Sidekiq processes are added, navigate to -**Admin Area > Monitoring > Background Jobs** (`/admin/background_jobs`) in GitLab. +To view the Sidekiq processes in GitLab: -![Multiple Sidekiq processes](img/sidekiq-cluster.png) +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Monitoring > Background Jobs**. ## Negate settings diff --git a/doc/administration/operations/fast_ssh_key_lookup.md b/doc/administration/operations/fast_ssh_key_lookup.md index 8acc40da4ab..bb0756cf948 100644 --- a/doc/administration/operations/fast_ssh_key_lookup.md +++ b/doc/administration/operations/fast_ssh_key_lookup.md @@ -104,11 +104,13 @@ In the case of lookup failures (which are common), the `authorized_keys` file is still scanned. So Git SSH performance would still be slow for many users as long as a large file exists. -You can disable any more writes to the `authorized_keys` file by unchecking -`Write to "authorized_keys" file` in the **Admin Area > Settings > Network > Performance optimization** of your GitLab -installation. +To disable any more writes to the `authorized_keys` file: -![Write to authorized keys setting](img/write_to_authorized_keys_setting.png) +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Settings > Network**. +1. Expand **Performance optimization**. +1. Clear the **Write to "authorized_keys" file** checkbox. +1. Select **Save changes**. Again, confirm that SSH is working by removing your user's SSH key in the UI, adding a new one, and attempting to pull a repository. diff --git a/doc/administration/operations/img/sidekiq-cluster.png b/doc/administration/operations/img/sidekiq-cluster.png Binary files differdeleted file mode 100644 index 3899385eb8f..00000000000 --- a/doc/administration/operations/img/sidekiq-cluster.png +++ /dev/null diff --git a/doc/administration/operations/img/write_to_authorized_keys_setting.png b/doc/administration/operations/img/write_to_authorized_keys_setting.png Binary files differdeleted file mode 100644 index f6227a6057b..00000000000 --- a/doc/administration/operations/img/write_to_authorized_keys_setting.png +++ /dev/null diff --git a/doc/administration/polling.md b/doc/administration/polling.md index f6732b8edc6..d3f558eeaaa 100644 --- a/doc/administration/polling.md +++ b/doc/administration/polling.md @@ -9,23 +9,24 @@ info: To determine the technical writer assigned to the Stage/Group associated w The GitLab UI polls for updates for different resources (issue notes, issue titles, pipeline statuses, etc.) on a schedule appropriate to the resource. -In **[Admin Area](../user/admin_area/index.md) > Settings > Preferences > Real-time features**, -you can configure "Polling -interval multiplier". This multiplier is applied to all resources at once, -and decimal values are supported. For the sake of the examples below, we will -say that issue notes poll every 2 seconds, and issue titles poll every 5 -seconds; these are _not_ the actual values. +To configure the polling interval multiplier: -- 1 is the default, and recommended for most installations. (Issue notes poll - every 2 seconds, and issue titles poll every 5 seconds.) -- 0 disables UI polling completely. (On the next poll, clients stop - polling for updates.) -- A value greater than 1 slows polling down. If you see issues with - database load from lots of clients polling for updates, increasing the - multiplier from 1 can be a good compromise, rather than disabling polling - completely. (For example: If this is set to 2, then issue notes poll every 4 - seconds, and issue titles poll every 10 seconds.) -- A value between 0 and 1 makes the UI poll more frequently (so updates - show in other sessions faster), but is **not recommended**. 1 should be - fast enough. (For example, if this is set to 0.5, then issue notes poll every - 1 second, and issue titles poll every 2.5 seconds.) +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Settings > Preferences**. +1. Expand **Real-time features**. +1. Set a value for the polling interval multiplier. This multiplier is applied + to all resources at once, and decimal values are supported: + + - `1.0` is the default, and recommended for most installations. + - `0` disables UI polling completely. On the next poll, clients stop + polling for updates. + - A value greater than `1` slows polling down. If you see issues with + database load from lots of clients polling for updates, increasing the + multiplier from 1 can be a good compromise, rather than disabling polling + completely. For example, if you set the value to `2`, all polling intervals + are multiplied by 2, which means that polling happens half as frequently. + - A value between `0` and `1` makes the UI poll more frequently (so updates + show in other sessions faster), but is **not recommended**. `1` should be + fast enough. + +1. Select **Save changes**. diff --git a/doc/administration/raketasks/check.md b/doc/administration/raketasks/check.md index 7f344a00f72..f7c91aa6b47 100644 --- a/doc/administration/raketasks/check.md +++ b/doc/administration/raketasks/check.md @@ -207,8 +207,7 @@ above. ### Dangling commits `gitlab:git:fsck` can find dangling commits. To fix them, try -[manually triggering housekeeping](../housekeeping.md#manual-housekeeping) -for the affected project(s). +[enabling housekeeping](../housekeeping.md). If the issue persists, try triggering `gc` via the [Rails Console](../operations/rails_console.md#starting-a-rails-console-session): diff --git a/doc/administration/raketasks/project_import_export.md b/doc/administration/raketasks/project_import_export.md index cd6ffc957b1..80321d75d66 100644 --- a/doc/administration/raketasks/project_import_export.md +++ b/doc/administration/raketasks/project_import_export.md @@ -50,8 +50,13 @@ Note the following: - Importing is only possible if the version of the import and export GitLab instances are compatible as described in the [Version history](../../user/project/settings/import_export.md#version-history). -- The project import option must be enabled in - application settings (`/admin/application_settings/general`) under **Import sources**, which is available - under **Admin Area > Settings > Visibility and access controls**. +- The project import option must be enabled: + + 1. On the top bar, select **Menu >** **{admin}** **Admin**. + 1. On the left sidebar, select **Settings > General**. + 1. Expand **Visibility and access controls**. + 1. Under **Import sources**, check the "Project export enabled" option. + 1. Select **Save changes**. + - The exports are stored in a temporary directory and are deleted every 24 hours by a specific worker. diff --git a/doc/administration/raketasks/storage.md b/doc/administration/raketasks/storage.md index 5b6d4e16d8d..cee63a6cae5 100644 --- a/doc/administration/raketasks/storage.md +++ b/doc/administration/raketasks/storage.md @@ -107,12 +107,15 @@ to project IDs 50 to 100 in an Omnibus GitLab installation: sudo gitlab-rake gitlab:storage:migrate_to_hashed ID_FROM=50 ID_TO=100 ``` -You can monitor the progress in the **Admin Area > Monitoring > Background Jobs** page. -There is a specific queue you can watch to see how long it will take to finish: -`hashed_storage:hashed_storage_project_migrate`. +To monitor the progress in GitLab: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Monitoring > Background Jobs**. +1. Watch how long the `hashed_storage:hashed_storage_project_migrate` queue + will take to finish. After it reaches zero, you can confirm every project + has been migrated by running the commands above. -After it reaches zero, you can confirm every project has been migrated by running the commands above. -If you find it necessary, you can run this migration script again to schedule missing projects. +If you find it necessary, you can run the previous migration script again to schedule missing projects. Any error or warning is logged in Sidekiq's log file. @@ -120,7 +123,7 @@ If [Geo](../geo/index.md) is enabled, each project that is successfully migrated generates an event to replicate the changes on any **secondary** nodes. You only need the `gitlab:storage:migrate_to_hashed` Rake task to migrate your repositories, but there are -[additional commands(#list-projects-and-attachments) to help you inspect projects and attachments in both legacy and hashed storage. +[additional commands](#list-projects-and-attachments) to help you inspect projects and attachments in both legacy and hashed storage. ## Rollback from hashed storage to legacy storage diff --git a/doc/install/azure/index.md b/doc/install/azure/index.md index 0d62e4d1215..1351489642e 100644 --- a/doc/install/azure/index.md +++ b/doc/install/azure/index.md @@ -238,9 +238,11 @@ in this section whenever you need to update GitLab. ### Check the current version -To determine the version of GitLab you're currently running, -go to the **{admin}** **Admin Area**, and find the version -under the **Components** table. +To determine the version of GitLab you're currently running: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Overview > Dashboard**. +1. Find the version under the **Components** table. If there's a newer available version of GitLab that contains one or more security fixes, GitLab displays an **Update asap** notification message that diff --git a/doc/subscriptions/index.md b/doc/subscriptions/index.md index 62681d9a657..575ddd5462e 100644 --- a/doc/subscriptions/index.md +++ b/doc/subscriptions/index.md @@ -211,13 +211,13 @@ After you ensure that you are using OSI-approved licenses for your projects, you ###### Screenshot 1: License overview -On the left sidebar, select **Project Information > Details**. Take a screenshot that includes a view of the license you've chosen for your project. +On the left sidebar, select **Project information > Details**. Take a screenshot that includes a view of the license you've chosen for your project. ![License overview](img/license-overview.png) ###### Screenshot 2: License file -Navigate to one of the license files that you uploaded. You can usually find the license file by selecting **Project Information > Details** and scanning the page for the license. +Navigate to one of the license files that you uploaded. You can usually find the license file by selecting **Project information > Details** and scanning the page for the license. Make sure the screenshot includes the title of the license. ![License file](img/license-file.png) diff --git a/doc/topics/autodevops/upgrading_auto_deploy_dependencies.md b/doc/topics/autodevops/upgrading_auto_deploy_dependencies.md index 62dc061aba6..48d37e5125c 100644 --- a/doc/topics/autodevops/upgrading_auto_deploy_dependencies.md +++ b/doc/topics/autodevops/upgrading_auto_deploy_dependencies.md @@ -77,7 +77,7 @@ The v2 auto-deploy-image drops support for Kubernetes 1.15 and lower. If you nee Kubernetes cluster, follow your cloud provider's instructions. Here's [an example on GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/upgrading-a-cluster). -#### Helm 3 +#### Helm v3 > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/228609) in GitLab 13.4. @@ -86,47 +86,38 @@ Previously, `auto-deploy-image` used Helm v2, which used Tiller in a cluster. In the v2 `auto-deploy-image`, it uses Helm v3 that doesn't require Tiller anymore. If your Auto DevOps project has an active environment that was deployed with the v1 -`auto-deploy-image`, use the following steps to upgrade to v2, which uses Helm 3: - -1. Modify your `.gitlab-ci.yml` with: - - ```yaml - include: - - template: Auto-DevOps.gitlab-ci.yml - - remote: https://gitlab.com/hfyngvason/ci-templates/-/raw/master/Helm-2to3.gitlab-ci.yml - - variables: - # If this variable is not present, the migration jobs will not show up - MIGRATE_HELM_2TO3: "true" - - .auto-deploy: - # Optional: If you are on GitLab 13.12 or older, pin the auto-deploy-image - # image: registry.gitlab.com/gitlab-org/cluster-integration/auto-deploy-image:v2.6.0 - variables: - AUTO_DEVOPS_FORCE_DEPLOY_V2: 1 - # If you have non-public pipelines, you can back up the entire namespace in a job artifact - # prior to the migration by setting the CI variable BACKUP_NAMESPACE to a non-empty value. - # WARNING: If you have public pipelines, this artifact will be public and can - # expose your secrets. - # BACKUP_HELM2_RELEASES: 1 - ``` - -1. Run the `<environment-name>:helm-2to3:migrate` job. -1. Deploy your environment as usual. This deployment uses Helm 3. -1. If the deployment succeeds, you can safely run `environment:helm-2to3:cleanup`. - This deletes all Helm 2 release data from the namespace. - - If you set `BACKUP_HELM2_RELEASES` to a non-empty value, the `<environment-name>:helm2to3:migrate` - job saves a backup for 1 week in a job artifact called `helm-2-release-backups`. - If you accidentally delete the Helm 2 releases before you are ready, then - this backup is in a Kubernetes manifest file that can be restored using - `kubectl apply -f $backup`. - - **WARNING:** - This artifact can contain secrets and is visible to any - user who can see your job. - -1. Remove the `MIGRATE_HELM_2TO3` CI/CD variable. +`auto-deploy-image`, use the following steps to upgrade to v2, which uses Helm v3: + +1. Include the [Helm 2to3 migration CI/CD template](https://gitlab.com/gitlab-org/gitlab/-/raw/master/lib/gitlab/ci/templates/Jobs/Helm-2to3.gitlab-ci.yml): + + - If you are on GitLab.com, or GitLab 14.0.1 or later, this template is already included in Auto DevOps. + - On other versions of GitLab, you can modify your `.gitlab-ci.yml` to include the templates: + + ```yaml + include: + - template: Auto-DevOps.gitlab-ci.yml + - remote: https://gitlab.com/gitlab-org/gitlab/-/raw/master/lib/gitlab/ci/templates/Jobs/Helm-2to3.gitlab-ci.yml + ``` + +1. Set the following CI/CD variables: + + - `MIGRATE_HELM_2TO3` to `true`. If this variable is not present, migration jobs do not run. + - `AUTO_DEVOPS_FORCE_DEPLOY_V2` to `1`. + - **Optional:** `BACKUP_HELM2_RELEASES` to `1`. If you set this variable, the migration + job saves a backup for 1 week in a job artifact called `helm-2-release-backups`. + If you accidentally delete the Helm v2 releases before you are ready, you can restore + this backup from a Kubernetes manifest file by using `kubectl apply -f $backup`. + + **WARNING:** + *Do not use this if you have public pipelines*. + This artifact can contain secrets and is visible to any + user who can see your job. + +1. Run a pipeline and trigger the `<environment-name>:helm-2to3:migrate` job. +1. Deploy your environment as usual. This deployment uses Helm v3. +1. If the deployment succeeds, you can safely run `<environment-name>:helm-2to3:cleanup`. + This deletes all Helm v2 release data from the namespace. +1. Remove the `MIGRATE_HELM_2TO3` CI/CD variable or set it to `false`. You can do this one environment at a time using [environment scopes](../../ci/environments/index.md#scoping-environments-with-specs). #### In-Cluster PostgreSQL Channel 2 diff --git a/doc/user/admin_area/geo_nodes.md b/doc/user/admin_area/geo_nodes.md index 32b1555c33d..19a76d0938b 100644 --- a/doc/user/admin_area/geo_nodes.md +++ b/doc/user/admin_area/geo_nodes.md @@ -10,7 +10,10 @@ type: howto You can configure various settings for GitLab Geo nodes. For more information, see [Geo documentation](../../administration/geo/index.md). -On the primary node, go to **Admin Area > Geo**. On secondary nodes, go to **Admin Area > Geo > Nodes**. +On either the primary or secondary node: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. ## Common settings @@ -61,8 +64,13 @@ The **primary** node's Internal URL is used by **secondary** nodes to contact it [External URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab) which is used by users. Internal URL does not need to be a private address. -Internal URL defaults to External URL, but you can customize it under -**Admin Area > Geo > Nodes**. +Internal URL defaults to external URL, but you can also customize it: + +1. On the top bar, select **Menu >** **{admin}** **Admin**. +1. On the left sidebar, select **Geo > Nodes**. +1. Select **Edit** on the node you want to customize. +1. Edit the internal URL. +1. Select **Save changes**. WARNING: We recommend using an HTTPS connection while configuring the Geo nodes. To avoid diff --git a/doc/user/group/index.md b/doc/user/group/index.md index 104ea57db4a..15fbb442752 100644 --- a/doc/user/group/index.md +++ b/doc/user/group/index.md @@ -79,7 +79,7 @@ You can give a user access to all projects in a group. 1. On the top bar, select **Menu > Groups**. 1. Select **Your Groups**. 1. Find your group and select it. -1. From the left sidebar, select **Members**. +1. From the left sidebar, select **Group information > Members**. 1. Fill in the fields. - The role applies to all projects in the group. [Learn more about permissions](../permissions.md). - On the **Access expiration date**, the user can no longer access projects in the group. @@ -118,11 +118,11 @@ You can change the owner of a group. Each group must always have at least one member with the [Owner role](../permissions.md#group-members-permissions). - As an administrator: - 1. Go to the group and from the left menu, select **Members**. + 1. Go to the group and from the left menu, select **Group information > Members**. 1. Give a different member the **Owner** role. 1. Refresh the page. You can now remove the **Owner** role from the original owner. - As the current group's owner: - 1. Go to the group and from the left menu, select **Members**. + 1. Go to the group and from the left menu, select **Group information > Members**. 1. Give a different member the **Owner** role. 1. Have the new owner sign in and remove the **Owner** role from you. @@ -138,7 +138,7 @@ Prerequisites: To remove a member from a group: 1. Go to the group. -1. From the left menu, select **Members**. +1. From the left menu, select **Group information > Members**. 1. Next to the member you want to remove, select **Delete**. 1. Optional. On the **Remove member** confirmation box, select the **Also unassign this user from linked issues and merge requests** checkbox. @@ -156,7 +156,7 @@ To find members in a group, you can sort, filter, or search. Filter a group to find members. By default, all members in the group and subgroups are displayed. -1. Go to the group and select **Members**. +1. Go to the group and select **Group information > Members**. 1. Above the list of members, in the **Filter members** box, enter filter criteria. - To view members in the group only, select **Membership = Direct**. - To view members of the group and its subgroups, select **Membership = Inherited**. @@ -166,7 +166,7 @@ Filter a group to find members. By default, all members in the group and subgrou You can search for members by name, username, or email. -1. Go to the group and select **Members**. +1. Go to the group and select **Group information > Members**. 1. Above the list of members, in the **Filter members** box, enter search criteria. 1. To the right of the **Filter members** box, select the magnifying glass (**{search}**). @@ -174,7 +174,7 @@ You can search for members by name, username, or email. You can sort members by **Account**, **Access granted**, **Max role**, or **Last sign-in**. -1. Go to the group and select **Members**. +1. Go to the group and select **Group information > Members**. 1. Above the list of members, on the top right, from the **Account** list, select the criteria to filter by. 1. To switch the sort between ascending and descending, to the right of the **Account** list, select the @@ -273,7 +273,7 @@ To share a given group, for example, `Frontend` with another group, for example, `Engineering`: 1. Go to the `Frontend` group. -1. From the left menu, select **Members**. +1. From the left menu, select **Group information > Members**. 1. Select the **Invite group** tab. 1. In the **Select a group to invite** list, select `Engineering`. 1. For the **Max role**, select a [role](../permissions.md). @@ -297,7 +297,7 @@ In GitLab 13.11, you can optionally replace the sharing form with a modal window To share a group after enabling this feature: 1. Go to your group's page. -1. In the left sidebar, go to **Members**, and then select **Invite a group**. +1. In the left sidebar, go to **Group information > Members**, and then select **Invite a group**. 1. Select a group, and select a **Max role**. 1. (Optional) Select an **Access expiration date**. 1. Select **Invite**. @@ -341,7 +341,7 @@ To create group links via filter: LDAP user permissions can be manually overridden by an administrator. To override a user's permissions: -1. Go to your group's **Members** page. +1. Go to your group's **Group information > Members** page. 1. In the row for the user you are editing, select the pencil (**{pencil}**) icon. 1. Select the brown **Edit permissions** button in the modal. diff --git a/doc/user/project/members/index.md b/doc/user/project/members/index.md index ab33ff0f6d8..11d6bfb5d0c 100644 --- a/doc/user/project/members/index.md +++ b/doc/user/project/members/index.md @@ -21,7 +21,7 @@ Prerequisite: To add a user to a project: -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. On the **Invite member** tab, under **GitLab member or Email address**, type the username or email address. In GitLab 13.11 and later, you can [replace this form with a modal window](#add-a-member-modal-window). 1. Select a [role](../../permissions.md). @@ -52,7 +52,7 @@ Prerequisite: To add groups to a project: -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. On the **Invite group** tab, under **Select a group to invite**, choose a group. 1. Select the highest max [role](../../permissions.md) for users in the group. 1. Optional. Choose an expiration date. On that date, the user can no longer access the project. @@ -75,7 +75,7 @@ Prerequisite: To import users: -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. On the **Invite member** tab, at the bottom of the panel, select **Import**. 1. Select the project. You can view only the projects for which you're a maintainer. 1. Select **Import project members**. @@ -113,7 +113,7 @@ Prerequisite: To remove a member from a project: -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. Next to the project member you want to remove, select **Remove member** **{remove}**. 1. Optional. In the confirmation box, select the **Also unassign this user from related issues and merge requests** checkbox. 1. Select **Remove member**. @@ -128,7 +128,7 @@ You can filter and sort members in a project. ### Display inherited members -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. In the **Filter members** box, select `Membership` `=` `Inherited`. 1. Press Enter. @@ -136,7 +136,7 @@ You can filter and sort members in a project. ### Display direct members -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. In the **Filter members** box, select `Membership` `=` `Direct`. 1. Press Enter. @@ -205,7 +205,7 @@ This feature might not be available to you. Check the **version history** note a In GitLab 13.11, you can optionally replace the form to add a member with a modal window. To add a member after enabling this feature: -1. Go to your project and select **Members**. +1. Go to your project and select **Project information > Members**. 1. Select **Invite members**. 1. Enter an email address and select a role. 1. Optional. Select an **Access expiration date**. diff --git a/doc/user/project/members/share_project_with_groups.md b/doc/user/project/members/share_project_with_groups.md index caef5ef60b7..353ce73329e 100644 --- a/doc/user/project/members/share_project_with_groups.md +++ b/doc/user/project/members/share_project_with_groups.md @@ -27,7 +27,7 @@ This is where the group sharing feature can be of use. To share 'Project Acme' with the 'Engineering' group: -1. For 'Project Acme' use the left navigation menu to go to **Members**. +1. For 'Project Acme' use the left navigation menu to go to **Project information > Members**. 1. Select the **Invite group** tab. 1. Add the 'Engineering' group with the maximum access level of your choice. 1. Optionally, select an expiring date. @@ -59,7 +59,7 @@ In GitLab 13.11, you can optionally replace the sharing form with a modal window To share a project after enabling this feature: 1. Go to your project's page. -1. In the left sidebar, go to **Members**, and then select **Invite a group**. +1. In the left sidebar, go to **Project information > Members**, and then select **Invite a group**. 1. Select a group, and select a **Max role**. 1. (Optional) Select an **Access expiration date**. 1. Select **Invite**. diff --git a/doc/user/project/merge_requests/allow_collaboration.md b/doc/user/project/merge_requests/allow_collaboration.md index 63d5119c1b4..5d1a04e1fe0 100644 --- a/doc/user/project/merge_requests/allow_collaboration.md +++ b/doc/user/project/merge_requests/allow_collaboration.md @@ -87,7 +87,7 @@ To see the pipeline status from the merge request page of a forked project going back to the original project: 1. Create a group containing all the upstream members. -1. Go to the **Members** tab in the forked project and invite the newly-created +1. Go to the **Project information > Members** page in the forked project and invite the newly-created group to the forked project. <!-- ## Troubleshooting diff --git a/doc/user/project/settings/project_access_tokens.md b/doc/user/project/settings/project_access_tokens.md index be8a961d6c0..d7121239610 100644 --- a/doc/user/project/settings/project_access_tokens.md +++ b/doc/user/project/settings/project_access_tokens.md @@ -49,7 +49,7 @@ For the bot: API calls made with a project access token are associated with the corresponding bot user. -These bot users are included in a project's **Members** list but cannot be modified. Also, a bot +These bot users are included in a project's **Project information > Members** list but cannot be modified. Also, a bot user cannot be added to any other project. - The username is set to `project_{project_id}_bot` for the first access token, such as `project_123_bot`. diff --git a/geo_architecture.png b/geo_architecture.png Binary files differdeleted file mode 100644 index d1ad3d1d93d..00000000000 --- a/geo_architecture.png +++ /dev/null diff --git a/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml b/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml index 5680950bba8..207e2cf074a 100644 --- a/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml +++ b/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml @@ -166,6 +166,7 @@ include: - template: Jobs/Deploy/EC2.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/EC2.gitlab-ci.yml - template: Jobs/DAST-Default-Branch-Deploy.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/DAST-Default-Branch-Deploy.gitlab-ci.yml - template: Jobs/Browser-Performance-Testing.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Browser-Performance-Testing.gitlab-ci.yml + - template: Jobs/Helm-2to3.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Helm-2to3.gitlab-ci.yml - template: Security/DAST.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Security/DAST.gitlab-ci.yml - template: Security/Container-Scanning.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml - template: Security/Dependency-Scanning.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Security/Dependency-Scanning.gitlab-ci.yml diff --git a/lib/gitlab/ci/templates/Jobs/Helm-2to3.gitlab-ci.yml b/lib/gitlab/ci/templates/Jobs/Helm-2to3.gitlab-ci.yml new file mode 100644 index 00000000000..a130b09c51a --- /dev/null +++ b/lib/gitlab/ci/templates/Jobs/Helm-2to3.gitlab-ci.yml @@ -0,0 +1,151 @@ +# This is a pre-release of a Helm 2to3 migration template to facilitate +# the migration of Auto DevOps releases to Helm 3. +# +# To use, set the CI variable MIGRATE_HELM_2TO3 to "true". +# For more details, go to https://docs.gitlab.com/ee/topics/autodevops/upgrading_auto_deploy_dependencies.html#helm-v3 + +.helm-2to3-migrate: + image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/helm-2to3-2.17.0-3.5.3-kube-1.16.15-alpine-3.12 + # NOTE: We use the deploy stage because: + # - It exists in all versions of Auto DevOps. + # - It is _empty_. + # - It precedes any Kubernetes deployments. + # Users on older versions of GitLab can include this template without adjusting their stages. + stage: deploy + needs: [] + artifacts: + when: always + expire_in: 1 week + paths: + - helm-2-release-backups/ + before_script: + - mkdir helm-2-release-backups + - apk add jq + - export TILLER_NAMESPACE=$KUBE_NAMESPACE + - export HELM_HOST="localhost:44134" + - nohup tiller -listen "${HELM_HOST}" >tiller.log 2>&1 & + - helm2 init --client-only + script: + # check for releases + - releases=$(helm2 ls --output json | jq -r '.Releases[].Name') + # back up entire namespace if the user opts into it + - | + if [[ -n "$releases" && -n "$BACKUP_HELM2_RELEASES" ]]; then + echo "Backing up releases" + kubectl get configmap -n "$KUBE_NAMESPACE" -l "OWNER=TILLER" -o yaml > "helm-2-release-backups/$KUBE_NAMESPACE.yaml" + fi + # adopt manifests from each release + - | + for release in $releases; do + chart=$(helm2 ls "^$release\$" --output json | jq -r '.Releases[0].Chart') + echo "Adopting Helm v2 manifests from $release" + # some resource kinds must be listed explicitly https://github.com/kubernetes/kubernetes/issues/42885 + for name in $(kubectl -n "$KUBE_NAMESPACE" get all,ingress,daemonset -o name -l chart="$chart"); do + kubectl annotate --overwrite "$name" meta.helm.sh/release-name="$release" + kubectl annotate --overwrite "$name" meta.helm.sh/release-namespace="$KUBE_NAMESPACE" + kubectl label --overwrite "$name" app.kubernetes.io/managed-by=Helm + done + done + # migrate each release + - | + for release in $releases; do + echo "Migrating release: $release" + helm3 2to3 convert --ignore-already-migrated --release-storage configmaps --tiller-out-cluster --tiller-ns "$TILLER_NAMESPACE" "$release" + done + +.helm-2to3-cleanup: + image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/helm-2to3-2.17.0-3.5.3-kube-1.16.15-alpine-3.12 + stage: cleanup + environment: + action: prepare + before_script: + - export TILLER_NAMESPACE=$KUBE_NAMESPACE + - export HELM_HOST="localhost:44134" + - nohup tiller -listen "${HELM_HOST}" >tiller.log 2>&1 & + - helm2 init --client-only + script: + - helm3 2to3 cleanup --skip-confirmation --release-storage configmaps --tiller-out-cluster --tiller-ns "$TILLER_NAMESPACE" + +.review: + environment: + name: review/$CI_COMMIT_REF_NAME + rules: + - if: '$MIGRATE_HELM_2TO3 != "true"' + when: never + - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' + when: never + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + when: never + - if: '$REVIEW_DISABLED' + when: never + - if: '$CI_COMMIT_TAG || $CI_COMMIT_BRANCH' + when: manual + +review:helm-2to3:migrate: + extends: [.review, .helm-2to3-migrate] + +review:helm-2to3:cleanup: + extends: [.review, .helm-2to3-cleanup] + rules: + - if: '$MIGRATE_HELM_2TO3 != "true" && $CLEANUP_HELM_2TO3 == null' + when: never + - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' + when: never + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + when: never + - if: '$REVIEW_DISABLED' + when: never + - if: '$CI_COMMIT_TAG || $CI_COMMIT_BRANCH' + when: manual + +.staging: + environment: + name: staging + rules: + - if: '$MIGRATE_HELM_2TO3 != "true"' + when: never + - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' + when: never + - if: '$CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH' + when: never + - if: '$STAGING_ENABLED' + when: manual + +staging:helm-2to3:migrate: + extends: [.staging, .helm-2to3-migrate] + +staging:helm-2to3:cleanup: + extends: [.staging, .helm-2to3-cleanup] + rules: + - if: '$MIGRATE_HELM_2TO3 != "true" && $CLEANUP_HELM_2TO3 == null' + when: never + - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' + when: never + - if: '$CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH' + when: never + - if: '$STAGING_ENABLED' + when: manual + +.production: + environment: + name: production + rules: + - if: '$MIGRATE_HELM_2TO3 != "true"' + when: never + - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' + when: never + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + when: manual + +production:helm-2to3:migrate: + extends: [.production, .helm-2to3-migrate] + +production:helm-2to3:cleanup: + extends: [.production, .helm-2to3-cleanup] + rules: + - if: '$MIGRATE_HELM_2TO3 != "true" && $CLEANUP_HELM_2TO3 == null' + when: never + - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' + when: never + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + when: manual diff --git a/spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb b/spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb index 6dfcecb853a..b40b4f5645f 100644 --- a/spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb +++ b/spec/lib/gitlab/ci/templates/auto_devops_gitlab_ci_yaml_spec.rb @@ -34,6 +34,7 @@ RSpec.describe 'Auto-DevOps.gitlab-ci.yml' do expect(build_names).not_to include('canary') expect(build_names).not_to include('review') expect(build_names).not_to include(a_string_matching(/rollout \d+%/)) + expect(build_names).not_to include(a_string_matching(/helm-2to3\d+%/)) end end @@ -190,6 +191,17 @@ RSpec.describe 'Auto-DevOps.gitlab-ci.yml' do expect(build_names).not_to include(a_string_matching(/rollout \d+%/)) end end + + context 'when MIGRATE_HELM_2TO3=true' do + before do + create(:ci_variable, project: project, key: 'MIGRATE_HELM_2TO3', value: 'true') + end + + it 'includes a helm-2to3:migrate and a helm-2to3:cleanup job' do + expect(build_names).to include('production:helm-2to3:migrate') + expect(build_names).to include('production:helm-2to3:cleanup') + end + end end context 'outside of default branch' do @@ -207,12 +219,23 @@ RSpec.describe 'Auto-DevOps.gitlab-ci.yml' do expect(build_names).to include('review') expect(build_names).not_to include(a_string_matching(/rollout \d+%/)) end + + context 'when MIGRATE_HELM_2TO3=true' do + before do + create(:ci_variable, project: project, key: 'MIGRATE_HELM_2TO3', value: 'true') + end + + it 'includes a helm-2to3:migrate and a helm-2to3:cleanup job' do + expect(build_names).to include('review:helm-2to3:migrate') + expect(build_names).to include('review:helm-2to3:cleanup') + end + end end end end end - describe 'build-pack detection' do + describe 'buildpack detection' do using RSpec::Parameterized::TableSyntax where(:case_name, :files, :variables, :include_build_names, :not_include_build_names) do |