diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2023-02-20 13:49:51 +0000 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2023-02-20 13:49:51 +0000 |
commit | 71786ddc8e28fbd3cb3fcc4b3ff15e5962a1c82e (patch) | |
tree | 6a2d93ef3fb2d353bb7739e4b57e6541f51cdd71 /scripts | |
parent | a7253423e3403b8c08f8a161e5937e1488f5f407 (diff) | |
download | gitlab-ce-71786ddc8e28fbd3cb3fcc4b3ff15e5962a1c82e.tar.gz |
Add latest changes from gitlab-org/gitlab@15-9-stable-eev15.9.0-rc42
Diffstat (limited to 'scripts')
-rw-r--r-- | scripts/api/default_options.rb | 3 | ||||
-rw-r--r-- | scripts/api/find_issues.rb | 29 | ||||
-rwxr-xr-x | scripts/build_qa_image | 53 | ||||
-rwxr-xr-x | scripts/db_tasks | 21 | ||||
-rwxr-xr-x | scripts/decomposition/generate-loose-foreign-key | 6 | ||||
-rwxr-xr-x | scripts/failed_tests.rb | 89 | ||||
-rwxr-xr-x | scripts/generate-e2e-pipeline | 13 | ||||
-rwxr-xr-x | scripts/ingest-reports-to-siem-devo | 45 | ||||
-rwxr-xr-x | scripts/lint-doc.sh | 26 | ||||
-rwxr-xr-x | scripts/lint-docs-blueprints.rb | 86 | ||||
-rwxr-xr-x | scripts/lint-json | 77 | ||||
-rwxr-xr-x | scripts/lint-json.sh | 8 | ||||
-rwxr-xr-x | scripts/partitioning/generate-fk | 3 | ||||
-rwxr-xr-x | scripts/pipeline_test_report_builder.rb | 125 | ||||
-rw-r--r-- | scripts/prepare_build.sh | 10 | ||||
-rwxr-xr-x | scripts/process_custom_semgrep_results.sh | 55 | ||||
-rwxr-xr-x | scripts/review_apps/automated_cleanup.rb | 55 | ||||
-rw-r--r-- | scripts/review_apps/base-config.yaml | 2 | ||||
-rwxr-xr-x | scripts/review_apps/review-apps.sh | 10 | ||||
-rw-r--r-- | scripts/rspec_helpers.sh | 89 | ||||
-rwxr-xr-x | scripts/trigger-build.rb | 33 | ||||
-rw-r--r-- | scripts/utils.sh | 28 | ||||
-rwxr-xr-x | scripts/verify-tff-mapping | 14 |
23 files changed, 672 insertions, 208 deletions
diff --git a/scripts/api/default_options.rb b/scripts/api/default_options.rb index d10666e3a68..3085ef55085 100644 --- a/scripts/api/default_options.rb +++ b/scripts/api/default_options.rb @@ -13,6 +13,7 @@ end module Host DEFAULT_OPTIONS = { instance_base_url: ENV['CI_SERVER_URL'], - mr_id: ENV['CI_MERGE_REQUEST_ID'] + target_project: ENV['CI_MERGE_REQUEST_PROJECT_ID'], + mr_iid: ENV['CI_MERGE_REQUEST_IID'] }.freeze end diff --git a/scripts/api/find_issues.rb b/scripts/api/find_issues.rb new file mode 100644 index 00000000000..a1c37030319 --- /dev/null +++ b/scripts/api/find_issues.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require 'gitlab' +require_relative 'default_options' + +class FindIssues + def initialize(options) + @project = options.fetch(:project) + + # Force the token to be a string so that if api_token is nil, it's set to '', + # allowing unauthenticated requests (for forks). + api_token = options.delete(:api_token).to_s + + warn "No API token given." if api_token.empty? + + @client = Gitlab.client( + endpoint: options.delete(:endpoint) || API::DEFAULT_OPTIONS[:endpoint], + private_token: api_token + ) + end + + def execute(search_data) + client.issues(project, search_data) + end + + private + + attr_reader :project, :client +end diff --git a/scripts/build_qa_image b/scripts/build_qa_image index c0001181a51..9c401718336 100755 --- a/scripts/build_qa_image +++ b/scripts/build_qa_image @@ -1,11 +1,31 @@ #!/bin/bash -QA_IMAGE_NAME="gitlab-ee-qa" -QA_BUILD_TARGET="ee" +function is_latest_stable_tag() { + [ "$(latest_stable_tag)" == "${CI_COMMIT_TAG}" ] +} + +function is_latest_tag() { + [ "$(latest_tag)" == "${CI_COMMIT_TAG}" ] +} + +function latest_tag() { + git -c versionsort.prereleaseSuffix=rc tag --sort=-v:refname | head -1 +} + +function latest_stable_tag() { + git -c versionsort.prereleaseSuffix=rc tag --sort=-v:refname | awk '!/rc/' | head -1 +} if [[ "${CI_PROJECT_NAME}" == "gitlabhq" || "${CI_PROJECT_NAME}" == "gitlab-foss" || "${FOSS_ONLY}" == "1" ]]; then QA_IMAGE_NAME="gitlab-ce-qa" QA_BUILD_TARGET="foss" +# Build QA Image for JH project +elif [[ "${CI_PROJECT_PATH}" =~ ^gitlab-(jh|cn)\/.*$ || "${CI_PROJECT_NAME}" =~ ^gitlab-jh ]]; then + QA_IMAGE_NAME="gitlab-jh-qa" + QA_BUILD_TARGET="jhqa" +else + QA_IMAGE_NAME="gitlab-ee-qa" + QA_BUILD_TARGET="ee" fi # Tag with commit SHA by default @@ -28,8 +48,37 @@ if [ "${QA_IMAGE_NAME}" == "gitlab-ee-qa" ]; then DESTINATIONS="${DESTINATIONS} --tag $QA_IMAGE_FOR_AUTO_DEPLOY" fi +# On tag pipelines in Canonical projects (gitlab and gitlab-foss), release to +# Dockerhub also +if [ -n "${CI_COMMIT_TAG}" ] && [ "${CI_PROJECT_NAMESPACE}" == "gitlab-org" ]; then + # Temporarily control release to Dockerhub, until we confirm it works in a + # release and finally drops the release job from omnibus-gitlab pipeline. + if [ "${RELEASE_QA_IMAGE_TO_DOCKERHUB}" == "true" ]; then + echo "$DOCKERHUB_PASSWORD" | docker login "docker.io" -u "$DOCKERHUB_USERNAME" --password-stdin + + DOCKERHUB_TAG_IMAGE="gitlab/${QA_IMAGE_NAME}:${IMAGE_TAG}" + DESTINATIONS="${DESTINATIONS} --tag ${DOCKERHUB_TAG_IMAGE}" + + # If we are on latest tag (RC or stable), tag the image as RC + if is_latest_tag; then + DESTINATIONS="${DESTINATIONS} --tag gitlab/${QA_IMAGE_NAME}:rc" + fi + + # If we are on latest stable tag, tag the image as latest + if is_latest_stable_tag; then + DESTINATIONS="${DESTINATIONS} --tag gitlab/${QA_IMAGE_NAME}:latest" + fi + else + echo "RELEASE_QA_IMAGE_TO_DOCKERHUB not set to true. Not releasing to Dockerhub." + fi +fi + echo "Building QA image for '${QA_BUILD_TARGET}' for destinations: ${DESTINATIONS}" +if [ "${QA_IMAGE_BUILD_DRY_RUN}" == "true" ]; then + exit 0 +fi + docker buildx build \ --cache-to=type=inline \ --cache-from="$QA_IMAGE_BRANCH" \ diff --git a/scripts/db_tasks b/scripts/db_tasks index 56c2eefef68..36040877abf 100755 --- a/scripts/db_tasks +++ b/scripts/db_tasks @@ -1,12 +1,17 @@ -#!/bin/bash +#!/usr/bin/env ruby +# frozen_string_literal: true -root_path="$(cd "$(dirname "$0")/.." || exit ; pwd -P)" -task=$1 +require 'yaml' -shift +rails_env = ENV.fetch('RAILS_ENV') +database_config = YAML.load_file(File.join(File.expand_path('..', __dir__), 'config', 'database.yml'))[rails_env] -if [[ -d "${root_path}/ee/" || "${DECOMPOSED_DB}" == "true" ]]; then - task="${task}:main" -fi +task = ARGV.shift +raise ArgumentError, 'You need to pass a task name!' unless task -eval "bundle exec rake ${task} ${*}" +task = "#{task}:main" unless database_config.one? +cmd = ['bundle', 'exec', 'rake', task, *ARGV] + +puts "Running: `#{cmd.join(' ')}`" + +system(*cmd) diff --git a/scripts/decomposition/generate-loose-foreign-key b/scripts/decomposition/generate-loose-foreign-key index 2e91aa662de..ad7d6e32aa0 100755 --- a/scripts/decomposition/generate-loose-foreign-key +++ b/scripts/decomposition/generate-loose-foreign-key @@ -164,7 +164,7 @@ def generate_migration(definition) content = <<-EOF.strip_heredoc # frozen_string_literal: true - class Remove#{definition.to_table.camelcase}#{definition.from_table.camelcase}#{definition.column.camelcase}Fk < Gitlab::Database::Migration[1.0] + class Remove#{definition.to_table.camelcase}#{definition.from_table.camelcase}#{definition.column.camelcase}Fk < Gitlab::Database::Migration[2.1] disable_ddl_transaction! def up @@ -178,7 +178,9 @@ def generate_migration(definition) end def down - add_concurrent_foreign_key(:#{definition.from_table}, :#{definition.to_table}, name: "#{definition.name}", column: :#{definition.column}, target_column: :#{definition.primary_key}, on_delete: :#{definition.on_delete}) + add_concurrent_foreign_key(:#{definition.from_table}, :#{definition.to_table}, + name: "#{definition.name}", column: :#{definition.column}, + target_column: :#{definition.primary_key}, on_delete: :#{definition.on_delete}) end end EOF diff --git a/scripts/failed_tests.rb b/scripts/failed_tests.rb index 319961d277c..786d3c24c74 100755 --- a/scripts/failed_tests.rb +++ b/scripts/failed_tests.rb @@ -8,31 +8,47 @@ require 'json' require 'set' class FailedTests + DEFAULT_OPTIONS = { + previous_tests_report_path: 'test_results/previous/test_reports.json', + output_directory: 'tmp/previous_failed_tests/', + format: :oneline, + rspec_pg_regex: /rspec .+ pg12( .+)?/, + rspec_ee_pg_regex: /rspec-ee .+ pg12( .+)?/ + }.freeze + def initialize(options) @filename = options.delete(:previous_tests_report_path) @output_directory = options.delete(:output_directory) + @format = options.delete(:format).to_sym @rspec_pg_regex = options.delete(:rspec_pg_regex) @rspec_ee_pg_regex = options.delete(:rspec_ee_pg_regex) end - def output_failed_test_files + def output_failed_tests create_output_dir - failed_files_for_suite_collection.each do |suite_collection_name, suite_collection_files| - failed_test_files = suite_collection_files.map { |filepath| filepath.delete_prefix('./') }.join(' ') + failed_cases_for_suite_collection.each do |suite_name, suite_tests| + puts "[FailedTests] Detected #{suite_tests.size} failed tests in suite #{suite_name}..." + suite_tests = + case format + when :oneline + suite_tests.map { |test| test['file'] }.join(' ') # rubocop:disable Rails/Pluck + when :json + JSON.pretty_generate(suite_tests.to_a) + end - output_file = File.join(output_directory, "#{suite_collection_name}_failed_files.txt") + output_file = File.join(output_directory, "#{suite_name}_failed_tests.#{output_file_format}") File.open(output_file, 'w') do |file| - file.write(failed_test_files) + file.write(suite_tests) end end end - def failed_files_for_suite_collection - suite_map.each_with_object(Hash.new { |h, k| h[k] = Set.new }) do |(suite_collection_name, suite_collection_regex), hash| + def failed_cases_for_suite_collection + suite_map.each_with_object(Hash.new { |h, k| h[k] = Set.new }) do |(suite_name, suite_collection_regex), hash| failed_suites.each do |suite| - hash[suite_collection_name].merge(failed_files(suite)) if suite['name'] =~ suite_collection_regex + hash[suite_name].merge(failed_cases(suite)) if suite['name'] =~ suite_collection_regex end end end @@ -47,7 +63,7 @@ class FailedTests private - attr_reader :filename, :output_directory, :rspec_pg_regex, :rspec_ee_pg_regex + attr_reader :filename, :output_directory, :format, :rspec_pg_regex, :rspec_ee_pg_regex def file_contents @file_contents ||= begin @@ -65,50 +81,75 @@ class FailedTests end end + def output_file_format + case format + when :oneline + 'txt' + when :json + 'json' + else + raise "[FailedTests] Unsupported format `#{format}` (allowed formats: `oneline` and `json`)!" + end + end + def failed_suites return [] unless file_contents_as_json['suites'] file_contents_as_json['suites'].select { |suite| suite['failed_count'] > 0 } end - def failed_files(suite) + def failed_cases(suite) return [] unless suite - suite['test_cases'].each_with_object([]) do |failure_hash, failed_cases| - failed_cases << failure_hash['file'] if failure_hash['status'] == 'failed' + suite['test_cases'].filter_map do |failure_hash| + next if failure_hash['status'] != 'failed' + + failure_hash['job_url'] = suite['job_url'] + failure_hash['file'] = failure_hash['file'].delete_prefix('./') + + failure_hash end end def create_output_dir return if File.directory?(output_directory) - puts 'Creating output directory...' + puts '[FailedTests] Creating output directory...' FileUtils.mkdir_p(output_directory) end end if $PROGRAM_NAME == __FILE__ - options = { - previous_tests_report_path: 'test_results/previous/test_reports.json', - output_directory: 'tmp/previous_failed_tests/', - rspec_pg_regex: /rspec .+ pg12( .+)?/, - rspec_ee_pg_regex: /rspec-ee .+ pg12( .+)?/ - } + options = FailedTests::DEFAULT_OPTIONS.dup OptionParser.new do |opts| - opts.on("-p", "--previous-tests-report-path PREVIOUS_TESTS_REPORT_PATH", String, "Path of the file listing previous test failures") do |value| + opts.on("-p", "--previous-tests-report-path PREVIOUS_TESTS_REPORT_PATH", String, + "Path of the file listing previous test failures (defaults to " \ + "`#{FailedTests::DEFAULT_OPTIONS[:previous_tests_report_path]}`)") do |value| options[:previous_tests_report_path] = value end - opts.on("-o", "--output-directory OUTPUT_DIRECTORY", String, "Output directory for failed test files") do |value| + opts.on("-o", "--output-directory OUTPUT_DIRECTORY", String, + "Output directory for failed test files (defaults to " \ + "`#{FailedTests::DEFAULT_OPTIONS[:output_directory]}`)") do |value| options[:output_directory] = value end - opts.on("--rspec-pg-regex RSPEC_PG_REGEX", Regexp, "Regex to use when finding matching RSpec jobs") do |value| + opts.on("-f", "--format [oneline|json]", String, + "Format of the output files: oneline (with test filenames) or JSON (defaults to " \ + "`#{FailedTests::DEFAULT_OPTIONS[:format]}`)") do |value| + options[:format] = value + end + + opts.on("--rspec-pg-regex RSPEC_PG_REGEX", Regexp, + "Regex to use when finding matching RSpec jobs (defaults to " \ + "`#{FailedTests::DEFAULT_OPTIONS[:rspec_pg_regex]}`)") do |value| options[:rspec_pg_regex] = value end - opts.on("--rspec-ee-pg-regex RSPEC_EE_PG_REGEX", Regexp, "Regex to use when finding matching RSpec EE jobs") do |value| + opts.on("--rspec-ee-pg-regex RSPEC_EE_PG_REGEX", Regexp, + "Regex to use when finding matching RSpec EE jobs (defaults to " \ + "`#{FailedTests::DEFAULT_OPTIONS[:rspec_ee_pg_regex]}`)") do |value| options[:rspec_ee_pg_regex] = value end @@ -118,5 +159,5 @@ if $PROGRAM_NAME == __FILE__ end end.parse! - FailedTests.new(options).output_failed_test_files + FailedTests.new(options).output_failed_tests end diff --git a/scripts/generate-e2e-pipeline b/scripts/generate-e2e-pipeline index aef2447e800..c612a700f90 100755 --- a/scripts/generate-e2e-pipeline +++ b/scripts/generate-e2e-pipeline @@ -22,16 +22,19 @@ fi qa_cache_key="qa-e2e-ruby-${RUBY_VERSION}-$(md5sum qa/Gemfile.lock | awk '{ print $1 }')" variables=$(cat <<YML variables: + COLORIZED_LOGS: "true" + GIT_DEPTH: "20" + GIT_STRATEGY: "clone" # 'GIT_STRATEGY: clone' optimizes the pack-objects cache hit ratio + GIT_SUBMODULE_STRATEGY: "none" GITLAB_QA_CACHE_KEY: "$qa_cache_key" GITLAB_VERSION: "$(cat VERSION)" - COLORIZED_LOGS: "true" QA_EXPORT_TEST_METRICS: "${QA_EXPORT_TEST_METRICS:-true}" - QA_SAVE_TEST_METRICS: "${QA_SAVE_TEST_METRICS:-false}" - QA_RUN_ALL_TESTS: "${QA_RUN_ALL_TESTS:-false}" - QA_FRAMEWORK_CHANGES: "${QA_FRAMEWORK_CHANGES:-false}" QA_FEATURE_FLAGS: "${QA_FEATURE_FLAGS}" - QA_TESTS: "$QA_TESTS" + QA_FRAMEWORK_CHANGES: "${QA_FRAMEWORK_CHANGES:-false}" + QA_RUN_ALL_TESTS: "${QA_RUN_ALL_TESTS:-false}" + QA_SAVE_TEST_METRICS: "${QA_SAVE_TEST_METRICS:-false}" QA_SUITES: "$QA_SUITES" + QA_TESTS: "$QA_TESTS" YML ) diff --git a/scripts/ingest-reports-to-siem-devo b/scripts/ingest-reports-to-siem-devo new file mode 100755 index 00000000000..0645d778217 --- /dev/null +++ b/scripts/ingest-reports-to-siem-devo @@ -0,0 +1,45 @@ +#!/usr/bin/env node + +const { S3Client, PutObjectCommand } = require('@aws-sdk/client-s3') +const { fromIni } = require('@aws-sdk/credential-provider-ini') +const path = require('path') +const fs = require('fs') +const crypto = require('crypto') + +function getMD5HashFromFile(data) { + const hash = crypto.createHash('md5').update(data).digest('base64') + return hash +} + +(async function () { + const s3Client = new S3Client({ + region: 'us-east-2', + credentials: fromIni({ profile: 'gl-logs-for-panther' }), + }) + try { + const file = 'gl-dependency-scanning-report.json' + const data = fs.readFileSync(file) + + const [filename, fileext] = path.basename(file).split('.') + const uniqueId = process.env['CI_PIPELINE_ID'] && process.env['CI_JOB_ID'] ? + process.env['CI_PIPELINE_ID'] + '-' + process.env['CI_JOB_ID'] : + Date.now() + const key = path.join('package_hunter_logs', filename + '-' + uniqueId + '.' + fileext) + + const responseData = await s3Client.send( + new PutObjectCommand({ + Bucket: 'package-hunter-logs', + Key: key, + Body: data, + ContentMD5: getMD5HashFromFile(data), + }), + ) + console.log('Successfully uploaded %s to %s', file, key) + } catch (err) { + if (err.name === 'CredentialsProviderError' || err.name === 'AuthorizationHeaderMalformed') + console.log('Could not upload the report. Are AWS credentials configured in ~/.aws/credentials?') + else + console.log('Unexpected error during upload: ', err.message) + process.exit(1) + } +})() diff --git a/scripts/lint-doc.sh b/scripts/lint-doc.sh index 68dfac95ef6..18e7d7d1c1c 100755 --- a/scripts/lint-doc.sh +++ b/scripts/lint-doc.sh @@ -119,19 +119,25 @@ else fi fi -function run_locally_or_in_docker() { +function run_locally_or_in_container() { local cmd=$1 local args=$2 + local registry_url="registry.gitlab.com/gitlab-org/gitlab-docs/lint-markdown:alpine-3.16-vale-2.22.0-markdownlint-0.32.2-markdownlint2-0.6.0" if hash ${cmd} 2>/dev/null then $cmd $args - elif hash docker 2>/dev/null + # When using software like Rancher Desktop, both nerdctl and docker binaries are available + # but only one is configured. To check which one to use, we need to probe each runtime + elif (hash nerdctl 2>/dev/null) && (nerdctl info 2>&1 1>/dev/null) then - docker run -t -v ${PWD}:/gitlab -w /gitlab --rm registry.gitlab.com/gitlab-org/gitlab-docs/lint-markdown:alpine-3.16-vale-2.20.1-markdownlint-0.32.2 ${cmd} ${args} + nerdctl run -t -v "${PWD}:/gitlab" -w /gitlab --rm ${registry_url} ${cmd} ${args} + elif (hash docker 2>/dev/null) && (docker info 2>&1 1>/dev/null) + then + docker run -t -v "${PWD}:/gitlab" -w /gitlab --rm ${registry_url} ${cmd} ${args} else echo - echo " ✖ ERROR: '${cmd}' not found. Install '${cmd}' or Docker to proceed." >&2 + echo " ✖ ERROR: '${cmd}' not found. Install '${cmd}' or a container runtime (Docker/Nerdctl) to proceed." >&2 echo ((ERRORCODE++)) fi @@ -151,11 +157,19 @@ if [ -z "${MD_DOC_PATH}" ] then echo "Merged results pipeline detected, but no markdown files found. Skipping." else - run_locally_or_in_docker 'markdownlint' "--config .markdownlint.yml ${MD_DOC_PATH} --rules doc/.markdownlint/rules" + yarn markdownlint --config .markdownlint.yml ${MD_DOC_PATH} --rules doc/.markdownlint/rules + + if [ $? -ne 0 ] + then + echo + echo '✖ ERROR: Markdownlint failed with errors.' >&2 + echo + ((ERRORCODE++)) + fi fi echo '=> Linting prose...' -run_locally_or_in_docker 'vale' "--minAlertLevel error --output=doc/.vale/vale.tmpl ${MD_DOC_PATH}" +run_locally_or_in_container 'vale' "--minAlertLevel error --output=doc/.vale/vale.tmpl ${MD_DOC_PATH}" if [ $ERRORCODE -ne 0 ] then diff --git a/scripts/lint-docs-blueprints.rb b/scripts/lint-docs-blueprints.rb new file mode 100755 index 00000000000..35e0013cb34 --- /dev/null +++ b/scripts/lint-docs-blueprints.rb @@ -0,0 +1,86 @@ +#!/usr/bin/env ruby + +# frozen_string_literal: true + +# Taken from Jekyll +# https://github.com/jekyll/jekyll/blob/3.5-stable/lib/jekyll/document.rb#L13 +YAML_FRONT_MATTER_REGEXP = /\A(---\s*\n.*?\n?)^((---|\.\.\.)\s*$\n?)/m.freeze +READ_LIMIT_BYTES = 1024 + +require 'yaml' + +def extract_front_matter(path) + File.open(path, 'r') do |f| + data = if match = YAML_FRONT_MATTER_REGEXP.match(f.read(READ_LIMIT_BYTES)) + YAML.safe_load(match[1]) + else + {} + end + + BlueprintFrontMatter.new(data) + end +end + +class BlueprintFrontMatter + STATUSES = %w[proposed accepted ongoing implemented rejected] + + attr_reader :errors + + def initialize(metadata) + @metadata = metadata + @errors = [] + end + + def validate + validate_status + validate_authors + validate_creation_date + end + + private + + def validate_status + status = @metadata['status'] + + add_error('Missing status') unless status + + return if STATUSES.include?(status) + + add_error("Unsupported status '#{status}': expected one of '#{STATUSES.join(', ')}'") + end + + def validate_authors + authors = @metadata['authors'] + + add_error('Missing authors') unless authors + add_error('Authors must be an array') unless authors.is_a?(Array) + end + + def validate_creation_date + return if @metadata['creation-date'] =~ /\d{4}-[01]\d-[0123]\d/ + + add_error("Invalid creation-date: the date format must be 'yyyy-mm-dd'") + end + + def add_error(msg) + @errors << msg + end +end + +if $PROGRAM_NAME == __FILE__ + exit_code = 0 + + Dir['doc/architecture/blueprints/*/index.md'].each do |blueprint| + meta = extract_front_matter(blueprint) + meta.validate + + next if meta.errors.empty? + + exit_code = 1 + + puts("✖ ERROR: Invalid #{blueprint}:") + meta.errors.each { |e| puts(" - #{e}") } + end + + exit(exit_code) +end diff --git a/scripts/lint-json b/scripts/lint-json new file mode 100755 index 00000000000..3fa952b13df --- /dev/null +++ b/scripts/lint-json @@ -0,0 +1,77 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "json" +require "optparse" +require "rainbow/refinement" +using Rainbow + +options = {} + +OptionParser.new do |opts| + opts.banner = 'Checks if JSON files are pretty.' + + opts.on('-f', '--format', 'Format JSON files inline.') do + options[:format] = true + end + + opts.on('-s', '--stats', 'Print statistics after processing.') do + options[:stats] = true + end + + opts.on('-v', '--verbose', 'Increase verbosity.') do + options[:verbose] = true + end + + opts.on('-q', '--quiet', 'Do not print anything. Disables -s and -v') do + options[:quiet] = true + end + + opts.on('-h', '--help', 'Prints this help') do + abort opts.to_s + end +end.parse! + +def make_pretty(file, format:, verbose:, quiet:) + json = File.read(file) + pretty = JSON.pretty_generate(JSON.parse(json)) << "\n" + + return :pretty if json == pretty + + puts "#{file} is not pretty" if verbose && !quiet + return :todo unless format + + puts "#{file} was not pretty. Fixed!" unless quiet + File.write(file, pretty) + :formatted +rescue JSON::ParserError + puts "#{file} is invalid. Skipping!" unless quiet + :error +end + +results = ARGV + .lazy + .flat_map { |pattern| Dir.glob(pattern) } + .map { |file| make_pretty(file, format: options[:format], verbose: options[:verbose], quiet: options[:quiet]) } + .to_a + +if options[:stats] && !options[:quiet] + puts format("Scanned total=%<total>d, pretty=%<pretty>d, formatted=%<formatted>d, error=%<error>d", + total: results.size, + pretty: results.count { |result| result == :pretty }, + formatted: results.count { |result| result == :formatted }, + error: results.count { |result| result == :error } + ) +end + +if results.any?(:todo) + unless options[:quiet] + puts "\nSome of the JSON files are not pretty-printed, you can run:".yellow + puts "\tscripts/lint-json -f $(git diff --name-only master... | grep \\\\.json)".white + puts "to fix them".yellow + end + + exit(1) +else + exit(0) +end diff --git a/scripts/lint-json.sh b/scripts/lint-json.sh deleted file mode 100755 index 685661c789a..00000000000 --- a/scripts/lint-json.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -for file in "$@" -do - yarn run -s jsonlint -p "$file" | perl -pe 'chomp if eof' | diff "$file" - -done diff --git a/scripts/partitioning/generate-fk b/scripts/partitioning/generate-fk new file mode 100755 index 00000000000..4f2dac1d61e --- /dev/null +++ b/scripts/partitioning/generate-fk @@ -0,0 +1,3 @@ +#!/bin/bash + +exec bundle exec rails generate gitlab:partitioning:foreign_keys "$@" diff --git a/scripts/pipeline_test_report_builder.rb b/scripts/pipeline_test_report_builder.rb index 90af0451864..6f69a5c692f 100755 --- a/scripts/pipeline_test_report_builder.rb +++ b/scripts/pipeline_test_report_builder.rb @@ -5,7 +5,6 @@ require 'optparse' require 'time' require 'fileutils' require 'uri' -require 'cgi' require 'net/http' require 'json' require_relative 'api/default_options' @@ -19,50 +18,79 @@ require_relative 'api/default_options' # https://gitlab.com/gitlab-org/gitlab/-/pipelines/363788864/tests/suite.json?build_ids[]=1555608749 # Push into expected format for failed tests class PipelineTestReportBuilder + DEFAULT_OPTIONS = { + target_project: Host::DEFAULT_OPTIONS[:target_project], + mr_iid: Host::DEFAULT_OPTIONS[:mr_iid], + api_endpoint: API::DEFAULT_OPTIONS[:endpoint], + output_file_path: 'test_results/test_reports.json', + pipeline_index: :previous + }.freeze + def initialize(options) @target_project = options.delete(:target_project) - @mr_id = options.delete(:mr_id) || Host::DEFAULT_OPTIONS[:mr_id] - @instance_base_url = options.delete(:instance_base_url) || Host::DEFAULT_OPTIONS[:instance_base_url] - @output_file_path = options.delete(:output_file_path) - end - - def test_report_for_latest_pipeline - build_test_report_json_for_pipeline(previous_pipeline) + @mr_iid = options.delete(:mr_iid) + @api_endpoint = options.delete(:api_endpoint).to_s + @output_file_path = options.delete(:output_file_path).to_s + @pipeline_index = options.delete(:pipeline_index).to_sym end def execute - if output_file_path - FileUtils.mkdir_p(File.dirname(output_file_path)) - end + FileUtils.mkdir_p(File.dirname(output_file_path)) File.open(output_file_path, 'w') do |file| - file.write(test_report_for_latest_pipeline) + file.write(test_report_for_pipeline) end end + def test_report_for_pipeline + build_test_report_json_for_pipeline + end + + def latest_pipeline + pipelines_sorted_descending[0] + end + def previous_pipeline - # Top of the list will always be the current pipeline + # Top of the list will always be the latest pipeline # Second from top will be the previous pipeline - pipelines_for_mr.sort_by { |a| -Time.parse(a['created_at']).to_i }[1] + pipelines_sorted_descending[1] end private - attr_reader :target_project, :mr_id, :instance_base_url, :output_file_path + def pipeline + @pipeline ||= + case pipeline_index + when :latest + latest_pipeline + when :previous + previous_pipeline + else + raise "[PipelineTestReportBuilder] Unsupported pipeline_index `#{pipeline_index}` (allowed index: `latest` and `previous`!" + end + end + + def pipelines_sorted_descending + # Top of the list will always be the current pipeline + # Second from top will be the previous pipeline + pipelines_for_mr.sort_by { |a| -a['id'] } + end + + attr_reader :target_project, :mr_iid, :api_endpoint, :output_file_path, :pipeline_index def pipeline_project_api_base_url(pipeline) - "#{instance_base_url}/api/v4/projects/#{pipeline['project_id']}" + "#{api_endpoint}/projects/#{pipeline['project_id']}" end def target_project_api_base_url - "#{instance_base_url}/api/v4/projects/#{CGI.escape(target_project)}" + "#{api_endpoint}/projects/#{target_project}" end def pipelines_for_mr - fetch("#{target_project_api_base_url}/merge_requests/#{mr_id}/pipelines") + @pipelines_for_mr ||= fetch("#{target_project_api_base_url}/merge_requests/#{mr_iid}/pipelines") end - def failed_builds_for_pipeline(pipeline) + def failed_builds_for_pipeline fetch("#{pipeline_project_api_base_url(pipeline)}/pipelines/#{pipeline['id']}/jobs?scope=failed&per_page=100") end @@ -70,44 +98,45 @@ class PipelineTestReportBuilder # Here we request individual builds, even though it is possible to supply multiple build IDs. # The reason for this; it is possible to lose the job context and name when requesting multiple builds. # Please see for more info: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69053#note_709939709 - def test_report_for_build(pipeline, build_id) - fetch("#{pipeline['web_url']}/tests/suite.json?build_ids[]=#{build_id}") + def test_report_for_build(pipeline_url, build_id) + fetch("#{pipeline_url}/tests/suite.json?build_ids[]=#{build_id}").tap do |suite| + suite['job_url'] = job_url(pipeline_url, build_id) + end rescue Net::HTTPServerException => e raise e unless e.response.code.to_i == 404 - puts "Artifacts not found. They may have expired. Skipping this build." + puts "[PipelineTestReportBuilder] Artifacts not found. They may have expired. Skipping this build." end - def build_test_report_json_for_pipeline(pipeline) + def build_test_report_json_for_pipeline # empty file if no previous failed pipeline - return {}.to_json if pipeline.nil? || pipeline['status'] != 'failed' + return {}.to_json if pipeline.nil? - test_report = {} + test_report = { 'suites' => [] } - puts "Discovered last failed pipeline (#{pipeline['id']}) for MR!#{mr_id}" + puts "[PipelineTestReportBuilder] Discovered #{pipeline_index} failed pipeline (##{pipeline['id']}) for MR!#{mr_iid}" - failed_builds_for_test_stage = failed_builds_for_pipeline(pipeline).select do |failed_build| - failed_build['stage'] == 'test' - end + failed_builds_for_pipeline.each do |failed_build| + next if failed_build['stage'] != 'test' - puts "#{failed_builds_for_test_stage.length} failed builds in test stage found..." + test_report['suites'] << test_report_for_build(pipeline['web_url'], failed_build['id']) + end - if failed_builds_for_test_stage.any? - test_report['suites'] ||= [] + test_report['suites'].compact! - failed_builds_for_test_stage.each do |failed_build| - suite = test_report_for_build(pipeline, failed_build['id']) - test_report['suites'] << suite if suite - end - end + puts "[PipelineTestReportBuilder] #{test_report['suites'].size} failed builds in test stage found..." test_report.to_json end + def job_url(pipeline_url, build_id) + pipeline_url.sub(%r{/pipelines/.+}, "/jobs/#{build_id}") + end + def fetch(uri_str) uri = URI(uri_str) - puts "URL: #{uri}" + puts "[PipelineTestReportBuilder] URL: #{uri}" request = Net::HTTP::Get.new(uri) @@ -119,7 +148,7 @@ class PipelineTestReportBuilder when Net::HTTPSuccess body = response.read_body else - raise "Unexpected response: #{response.value}" + raise "[PipelineTestReportBuilder] Unexpected response: #{response.value}" end end end @@ -129,25 +158,17 @@ class PipelineTestReportBuilder end if $PROGRAM_NAME == __FILE__ - options = Host::DEFAULT_OPTIONS.dup + options = PipelineTestReportBuilder::DEFAULT_OPTIONS.dup OptionParser.new do |opts| - opts.on("-t", "--target-project TARGET_PROJECT", String, "Project where to find the merge request") do |value| - options[:target_project] = value - end - - opts.on("-m", "--mr-id MR_ID", String, "A merge request ID") do |value| - options[:mr_id] = value - end - - opts.on("-i", "--instance-base-url INSTANCE_BASE_URL", String, "URL of the instance where project and merge request resides") do |value| - options[:instance_base_url] = value - end - opts.on("-o", "--output-file-path OUTPUT_PATH", String, "A path for output file") do |value| options[:output_file_path] = value end + opts.on("-p", "--pipeline-index [latest|previous]", String, "What pipeline to retrieve (defaults to `#{PipelineTestReportBuilder::DEFAULT_OPTIONS[:pipeline_index]}`)") do |value| + options[:pipeline_index] = value + end + opts.on("-h", "--help", "Prints this help") do puts opts exit diff --git a/scripts/prepare_build.sh b/scripts/prepare_build.sh index d68432d9ec0..500e61ab76a 100644 --- a/scripts/prepare_build.sh +++ b/scripts/prepare_build.sh @@ -17,10 +17,12 @@ else cp config/database.yml.postgresql config/database.yml fi -# Remove Geo database setting if `ee/` directory does not exist. When it does -# not exist, it runs the GitLab test suite "as if FOSS", meaning the jobs run -# in the context of gitlab-org/gitlab-foss where the Geo is not available. -if [ ! -d "ee/" ] ; then +# Set up Geo database if the job name matches `rspec-ee` or `geo`. +# Since Geo is an EE feature, we shouldn't set it up for non-EE tests. +if [[ "${CI_JOB_NAME}" =~ "rspec-ee" ]] || [[ "${CI_JOB_NAME}" =~ "geo" ]]; then + echoinfo "Geo DB will be set up." +else + echoinfo "Geo DB won't be set up." sed -i '/geo:/,/^$/d' config/database.yml fi diff --git a/scripts/process_custom_semgrep_results.sh b/scripts/process_custom_semgrep_results.sh new file mode 100755 index 00000000000..1fdd8e486f3 --- /dev/null +++ b/scripts/process_custom_semgrep_results.sh @@ -0,0 +1,55 @@ +# This script requires BOT_USER_ID, CUSTOM_SAST_RULES_BOT_PAT and CI_MERGE_REQUEST_IID variables to be set + +echo "Processing vuln report" + +# Preparing the message for the comment that will be posted by the bot +# Empty string if there are no findings +jq -crM '.vulnerabilities | + map( select( .identifiers[0].name | test( "glappsec_" ) ) | + "- `" + .location.file + "` line " + ( .location.start_line | tostring ) + + ( + if .location.start_line = .location.end_line then "" + else ( " to " + ( .location.end_line | tostring ) ) end + ) + ": " + .message + ) | + sort | + if length > 0 then + { body: ("The findings below have been detected based on the AppSec custom SAST rules. For more information about this bot and what to do with this comment head over to the [README](https://gitlab.com/gitlab-com/gl-security/appsec/sast-custom-rules/-/tree/main/appsec-pings). The following lines of code possibly need attention:\n\n" + join("\n") + "\n\n/cc @gitlab-com/gl-security/appsec") } + else + empty + end' gl-sast-report.json >findings.txt + +echo "Resulting file:" +cat findings.txt + +EXISTING_COMMENT_ID=$(curl "https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/notes" \ + --header "Private-Token: $CUSTOM_SAST_RULES_BOT_PAT" | + jq -crM 'map( select( .author.id == (env.BOT_USER_ID | tonumber) ) | .id ) | first') + +echo "EXISTING_COMMENT_ID: $EXISTING_COMMENT_ID" + +if [ "$EXISTING_COMMENT_ID" == "null" ]; then + if [ -s findings.txt ]; then + echo "No existing comment and there are findings: a new comment will be posted" + curl "https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/notes" \ + --header "Private-Token: $CUSTOM_SAST_RULES_BOT_PAT" \ + --header 'Content-Type: application/json' \ + --data '@findings.txt' + else + echo "No existing comment and no findings: nothing to do" + fi +else + if [ -s findings.txt ]; then + echo "There is an existing comment and there are findings: the existing comment will be updated" + curl --request PUT "https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/notes/$EXISTING_COMMENT_ID" \ + --header "Private-Token: $CUSTOM_SAST_RULES_BOT_PAT" \ + --header 'Content-Type: application/json' \ + --data '@findings.txt' + else + echo "There is an existing comment but no findings: the existing comment will be updated to mention everything is resolved" + curl --request PUT "https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/notes/$EXISTING_COMMENT_ID" \ + --header "Private-Token: $CUSTOM_SAST_RULES_BOT_PAT" \ + --header 'Content-Type: application/json' \ + --data '{"body":"All findings based on the [AppSec custom Semgrep rules](https://gitlab.com/gitlab-com/gl-security/appsec/sast-custom-rules/) have been resolved! :tada:"}' + fi +fi diff --git a/scripts/review_apps/automated_cleanup.rb b/scripts/review_apps/automated_cleanup.rb index f459cb9c0ea..7e606b74de9 100755 --- a/scripts/review_apps/automated_cleanup.rb +++ b/scripts/review_apps/automated_cleanup.rb @@ -57,26 +57,28 @@ module ReviewApps end def helm - @helm ||= Tooling::Helm3Client.new(namespace: review_apps_namespace) + @helm ||= Tooling::Helm3Client.new end def kubernetes @kubernetes ||= Tooling::KubernetesClient.new(namespace: review_apps_namespace) end - def perform_gitlab_environment_cleanup!(days_for_stop:, days_for_delete:) - puts "Checking for Review Apps not updated in the last #{days_for_stop} days..." + def perform_gitlab_environment_cleanup!(days_for_delete:) + puts "Checking for Review Apps not updated in the last #{days_for_delete} days..." checked_environments = [] delete_threshold = threshold_time(days: days_for_delete) - stop_threshold = threshold_time(days: days_for_stop) deployments_look_back_threshold = threshold_time(days: days_for_delete * 5) releases_to_delete = [] # Delete environments via deployments gitlab.deployments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc').auto_paginate do |deployment| - break if Time.parse(deployment.created_at) < deployments_look_back_threshold + last_deploy = deployment.created_at + deployed_at = Time.parse(last_deploy) + + break if deployed_at < deployments_look_back_threshold environment = deployment.environment @@ -84,27 +86,20 @@ module ReviewApps next unless environment.name.start_with?(ENVIRONMENT_PREFIX[:review_app]) next if checked_environments.include?(environment.slug) - last_deploy = deployment.created_at - deployed_at = Time.parse(last_deploy) - if deployed_at < delete_threshold deleted_environment = delete_environment(environment, deployment) + if deleted_environment - release = Tooling::Helm3Client::Release.new(environment.slug, 1, deployed_at.to_s, nil, nil, environment.slug) + release = Tooling::Helm3Client::Release.new(name: environment.slug, namespace: environment.slug, revision: 1) releases_to_delete << release end - elsif deployed_at >= stop_threshold - print_release_state(subject: 'Review App', release_name: environment.slug, release_date: last_deploy, action: 'leaving') - else - environment_state = fetch_environment(environment)&.state - stop_environment(environment, deployment) if environment_state && environment_state != 'stopped' end checked_environments << environment.slug end delete_stopped_environments(environment_type: :review_app, checked_environments: checked_environments, last_updated_threshold: delete_threshold) do |environment| - releases_to_delete << Tooling::Helm3Client::Release.new(environment.slug, 1, environment.updated_at, nil, nil, environment.slug) + releases_to_delete << Tooling::Helm3Client::Release.new(name: environment.slug, namespace: environment.slug, revision: 1, updated: environment.updated_at) end delete_helm_releases(releases_to_delete) @@ -116,18 +111,21 @@ module ReviewApps checked_environments = [] stop_threshold = threshold_time(days: days_for_stop) delete_threshold = threshold_time(days: days_for_delete) + deployments_look_back_threshold = threshold_time(days: days_for_delete * 5) # Delete environments via deployments gitlab.deployments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc').auto_paginate do |deployment| + last_deploy = deployment.created_at + deployed_at = Time.parse(last_deploy) + + break if deployed_at < deployments_look_back_threshold + environment = deployment.environment next unless environment next unless environment.name.start_with?(ENVIRONMENT_PREFIX[:docs_review_app]) next if checked_environments.include?(environment.slug) - last_deploy = deployment.created_at - deployed_at = Time.parse(last_deploy) - if deployed_at < stop_threshold environment_state = fetch_environment(environment)&.state stop_environment(environment, deployment) if environment_state && environment_state != 'stopped' @@ -150,6 +148,7 @@ module ReviewApps helm_releases.each do |release| # Prevents deleting `dns-gitlab-review-app` releases or other unrelated releases + next unless Tooling::KubernetesClient::K8S_ALLOWED_NAMESPACES_REGEX.match?(release.namespace) next unless release.name.start_with?('review-') if release.status == 'failed' || release.last_update < threshold @@ -228,7 +227,7 @@ module ReviewApps end def helm_releases - args = ['--all', '--date'] + args = ['--all', '--all-namespaces', '--date'] helm.releases(args: args) end @@ -294,10 +293,6 @@ if $PROGRAM_NAME == __FILE__ automated_cleanup = ReviewApps::AutomatedCleanup.new(options: options) - timed('Review Apps cleanup') do - automated_cleanup.perform_gitlab_environment_cleanup!(days_for_stop: 5, days_for_delete: 6) - end - timed('Docs Review Apps cleanup') do automated_cleanup.perform_gitlab_docs_environment_cleanup!(days_for_stop: 20, days_for_delete: 30) end @@ -305,13 +300,23 @@ if $PROGRAM_NAME == __FILE__ puts timed('Helm releases cleanup') do - automated_cleanup.perform_helm_releases_cleanup!(days: 7) + automated_cleanup.perform_helm_releases_cleanup!(days: 2) end + puts + + timed('Review Apps cleanup') do + automated_cleanup.perform_gitlab_environment_cleanup!(days_for_delete: 3) + end + + puts + timed('Stale Namespace cleanup') do - automated_cleanup.perform_stale_namespace_cleanup!(days: 14) + automated_cleanup.perform_stale_namespace_cleanup!(days: 3) end + puts + timed('Stale PVC cleanup') do automated_cleanup.perform_stale_pvc_cleanup!(days: 30) end diff --git a/scripts/review_apps/base-config.yaml b/scripts/review_apps/base-config.yaml index 4d0f0ffec42..1d062a76191 100644 --- a/scripts/review_apps/base-config.yaml +++ b/scripts/review_apps/base-config.yaml @@ -91,7 +91,7 @@ gitlab: memory: 1927Mi limits: cpu: 450m - memory: 2890Mi + memory: 3500Mi webservice: resources: diff --git a/scripts/review_apps/review-apps.sh b/scripts/review_apps/review-apps.sh index 6a72b19c631..98ad3112202 100755 --- a/scripts/review_apps/review-apps.sh +++ b/scripts/review_apps/review-apps.sh @@ -130,7 +130,7 @@ function disable_sign_ups() { # Create the root token + Disable sign-ups local disable_signup_rb="token = User.find_by_username('root').personal_access_tokens.create(scopes: [:api], name: 'Token to disable sign-ups'); token.set_token('${REVIEW_APPS_ROOT_TOKEN}'); begin; token.save!; rescue(ActiveRecord::RecordNotUnique); end; Gitlab::CurrentSettings.current_application_settings.update!(signup_enabled: false)" - if (retry "run_task \"${disable_signup_rb}\""); then + if (retry_exponential "run_task \"${disable_signup_rb}\""); then echoinfo "Sign-ups have been disabled successfully." else echoerr "Sign-ups are still enabled!" @@ -267,10 +267,10 @@ function deploy() { sentry_enabled="true" fi - ensure_namespace "${namespace}" - label_namespace "${namespace}" "tls=review-apps-tls" # label namespace for kubed to sync tls + retry "ensure_namespace \"${namespace}\"" + retry "label_namespace \"${namespace}\" \"tls=review-apps-tls\"" # label namespace for kubed to sync tls - create_application_secret + retry "create_application_secret" cat > review_apps.values.yml <<EOF gitlab: @@ -338,7 +338,7 @@ EOF echoinfo "Deploying with:" echo "${HELM_CMD}" | sed 's/ /\n\t/g' - run_timed_command "eval \"${HELM_CMD}\"" + retry "eval \"${HELM_CMD}\"" } function verify_deploy() { diff --git a/scripts/rspec_helpers.sh b/scripts/rspec_helpers.sh index 923b633fcc9..de735e03db0 100644 --- a/scripts/rspec_helpers.sh +++ b/scripts/rspec_helpers.sh @@ -75,30 +75,26 @@ function crystalball_rspec_data_exists() { compgen -G "crystalball/rspec*.yml" >/dev/null } -function retrieve_previous_failed_tests() { +function retrieve_failed_tests() { local directory_for_output_reports="${1}" - local rspec_pg_regex="${2}" - local rspec_ee_pg_regex="${3}" - local pipeline_report_path="test_results/previous/test_reports.json" - - # Used to query merge requests. This variable reflects where the merge request has been created - local target_project_path="${CI_MERGE_REQUEST_PROJECT_PATH}" - local instance_url="${CI_SERVER_URL}" + local failed_tests_format="${2}" + local pipeline_index="${3}" + local pipeline_report_path="tmp/test_results/${pipeline_index}/test_reports.json" echo 'Attempting to build pipeline test report...' - scripts/pipeline_test_report_builder.rb --instance-base-url "${instance_url}" --target-project "${target_project_path}" --mr-id "${CI_MERGE_REQUEST_IID}" --output-file-path "${pipeline_report_path}" + scripts/pipeline_test_report_builder.rb --output-file-path "${pipeline_report_path}" --pipeline-index "${pipeline_index}" echo 'Generating failed tests lists...' - scripts/failed_tests.rb --previous-tests-report-path "${pipeline_report_path}" --output-directory "${directory_for_output_reports}" --rspec-pg-regex "${rspec_pg_regex}" --rspec-ee-pg-regex "${rspec_ee_pg_regex}" + scripts/failed_tests.rb --previous-tests-report-path "${pipeline_report_path}" --format "${failed_tests_format}" --output-directory "${directory_for_output_reports}" } function rspec_args() { local rspec_opts="${1}" local junit_report_file="${2:-${JUNIT_RESULT_FILE}}" - echo "-Ispec -rspec_helper --color --format documentation --format RspecJunitFormatter --out ${junit_report_file} ${rspec_opts}" + echo "-Ispec -rspec_helper --color --failure-exit-code 1 --error-exit-code 2 --format documentation --format RspecJunitFormatter --out ${junit_report_file} ${rspec_opts}" } function rspec_simple_job() { @@ -110,10 +106,18 @@ function rspec_simple_job() { eval "${rspec_cmd}" } +function rspec_simple_job_with_retry () { + local rspec_run_status=0 + + rspec_simple_job "${1}" "${2}" || rspec_run_status=$? + + handle_retry_rspec_in_new_process $rspec_run_status +} + function rspec_db_library_code() { local db_files="spec/lib/gitlab/database/" - rspec_simple_job "-- ${db_files}" + rspec_simple_job_with_retry "-- ${db_files}" } function debug_rspec_variables() { @@ -131,11 +135,30 @@ function debug_rspec_variables() { echoinfo "FLAKY_RSPEC_REPORT_PATH: ${FLAKY_RSPEC_REPORT_PATH}" echoinfo "NEW_FLAKY_RSPEC_REPORT_PATH: ${NEW_FLAKY_RSPEC_REPORT_PATH}" echoinfo "SKIPPED_FLAKY_TESTS_REPORT_PATH: ${SKIPPED_FLAKY_TESTS_REPORT_PATH}" - echoinfo "RETRIED_TESTS_REPORT_PATH: ${RETRIED_TESTS_REPORT_PATH}" echoinfo "CRYSTALBALL: ${CRYSTALBALL}" } +function handle_retry_rspec_in_new_process() { + local rspec_run_status="${1}" + + if [[ $rspec_run_status -eq 2 ]]; then + echoerr "Not retrying failing examples since there were errors happening outside of the RSpec examples!" + elif [[ $rspec_run_status -eq 1 ]]; then + # Experiment to retry failed examples in a new RSpec process: https://gitlab.com/gitlab-org/quality/team-tasks/-/issues/1148 + if [[ "${RETRY_FAILED_TESTS_IN_NEW_PROCESS}" == "true" ]]; then + retry_failed_rspec_examples + rspec_run_status=$? + else + echoerr "Not retrying failing examples since \$RETRY_FAILED_TESTS_IN_NEW_PROCESS != 'true'!" + fi + else + echosuccess "No examples to retry, congrats!" + fi + + exit $rspec_run_status +} + function rspec_paralellized_job() { read -ra job_name <<< "${CI_JOB_NAME}" local test_tool="${job_name[0]}" @@ -179,7 +202,6 @@ function rspec_paralellized_job() { export FLAKY_RSPEC_REPORT_PATH="${rspec_flaky_folder_path}all_${report_name}_report.json" export NEW_FLAKY_RSPEC_REPORT_PATH="${rspec_flaky_folder_path}new_${report_name}_report.json" export SKIPPED_FLAKY_TESTS_REPORT_PATH="${rspec_flaky_folder_path}skipped_flaky_tests_${report_name}_report.txt" - export RETRIED_TESTS_REPORT_PATH="${rspec_flaky_folder_path}retried_tests_${report_name}_report.txt" if [[ -d "ee/" ]]; then export KNAPSACK_GENERATE_REPORT="true" @@ -204,17 +226,7 @@ function rspec_paralellized_job() { echoinfo "RSpec exited with ${rspec_run_status}." - # Experiment to retry failed examples in a new RSpec process: https://gitlab.com/gitlab-org/quality/team-tasks/-/issues/1148 - if [[ $rspec_run_status -ne 0 ]]; then - if [[ "${RETRY_FAILED_TESTS_IN_NEW_PROCESS}" == "true" ]]; then - retry_failed_rspec_examples - rspec_run_status=$? - fi - else - echosuccess "No examples to retry, congrats!" - fi - - exit $rspec_run_status + handle_retry_rspec_in_new_process $rspec_run_status } function retry_failed_rspec_examples() { @@ -228,6 +240,12 @@ function retry_failed_rspec_examples() { # Keep track of the tests that are retried, later consolidated in a single file by the `rspec:flaky-tests-report` job local failed_examples=$(grep " failed" ${RSPEC_LAST_RUN_RESULTS_FILE}) + local report_name=$(echo "${CI_JOB_NAME}" | sed -E 's|[/ ]|_|g') # e.g. 'rspec unit pg12 1/24' would become 'rspec_unit_pg12_1_24' + local rspec_flaky_folder_path="$(dirname "${FLAKY_RSPEC_SUITE_REPORT_PATH}")/" + + export RETRIED_TESTS_REPORT_PATH="${rspec_flaky_folder_path}retried_tests_${report_name}_report.txt" + echoinfo "RETRIED_TESTS_REPORT_PATH: ${RETRIED_TESTS_REPORT_PATH}" + echo "${CI_JOB_URL}" > "${RETRIED_TESTS_REPORT_PATH}" echo $failed_examples >> "${RETRIED_TESTS_REPORT_PATH}" @@ -241,8 +259,11 @@ function retry_failed_rspec_examples() { # Disable simplecov so retried tests don't override test coverage report export SIMPLECOV=0 + local default_knapsack_pattern="{,ee/,jh/}spec/{,**/}*_spec.rb" + local knapsack_test_file_pattern="${KNAPSACK_TEST_FILE_PATTERN:-$default_knapsack_pattern}" + # Retry only the tests that failed on first try - rspec_simple_job "--only-failures --pattern \"${KNAPSACK_TEST_FILE_PATTERN}\"" "${JUNIT_RETRY_FILE}" + rspec_simple_job "--only-failures --pattern \"${knapsack_test_file_pattern}\"" "${JUNIT_RETRY_FILE}" rspec_run_status=$? # Merge the JUnit report from retry into the first-try report @@ -295,7 +316,7 @@ function rspec_rerun_previous_failed_tests() { fi if [[ -n $test_files ]]; then - rspec_simple_job "${test_files}" + rspec_simple_job_with_retry "${test_files}" else echo "No failed test files to rerun" fi @@ -316,7 +337,7 @@ function rspec_fail_fast() { fi if [[ -n $test_files ]]; then - rspec_simple_job "${rspec_opts} ${test_files}" + rspec_simple_job_with_retry "${rspec_opts} ${test_files}" else echo "No rspec fail-fast tests to run" fi @@ -326,10 +347,18 @@ function filter_rspec_matched_foss_tests() { local matching_tests_file="${1}" local foss_matching_tests_file="${2}" - # Keep only files that exists (i.e. exclude EE speficic files) + # Keep only FOSS files that exists cat ${matching_tests_file} | ruby -e 'puts $stdin.read.split(" ").select { |f| f.start_with?("spec/") && File.exist?(f) }.join(" ")' > "${foss_matching_tests_file}" } +function filter_rspec_matched_ee_tests() { + local matching_tests_file="${1}" + local ee_matching_tests_file="${2}" + + # Keep only EE files that exists + cat ${matching_tests_file} | ruby -e 'puts $stdin.read.split(" ").select { |f| f.start_with?("ee/spec/") && File.exist?(f) }.join(" ")' > "${ee_matching_tests_file}" +} + function generate_frontend_fixtures_mapping() { local pattern="" @@ -351,7 +380,7 @@ function generate_frontend_fixtures_mapping() { mkdir -p $(dirname "$FRONTEND_FIXTURES_MAPPING_PATH") - rspec_simple_job "--pattern \"${pattern}\"" + rspec_simple_job_with_retry "--pattern \"${pattern}\"" } function cleanup_individual_job_reports() { diff --git a/scripts/trigger-build.rb b/scripts/trigger-build.rb index 033c2e55329..69eea7488fb 100755 --- a/scripts/trigger-build.rb +++ b/scripts/trigger-build.rb @@ -9,10 +9,6 @@ module Trigger %w[gitlab gitlab-ee].include?(ENV['CI_PROJECT_NAME']) end - def self.security? - %r{\Agitlab-org/security(\z|/)}.match?(ENV['CI_PROJECT_NAMESPACE']) - end - def self.non_empty_variable_value(variable) variable_value = ENV[variable] @@ -30,10 +26,10 @@ module Trigger class Base # Can be overridden def self.access_token - ENV['GITLAB_BOT_MULTI_PROJECT_PIPELINE_POLLING_TOKEN'] + ENV['PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE'] end - def invoke!(downstream_job_name: nil) + def invoke! pipeline_variables = variables puts "Triggering downstream pipeline on #{downstream_project_path}" @@ -48,18 +44,7 @@ module Trigger puts "Triggered downstream pipeline: #{pipeline.web_url}\n" puts "Waiting for downstream pipeline status" - downstream_job = - if downstream_job_name - downstream_client.pipeline_jobs(downstream_project_path, pipeline.id).auto_paginate.find do |potential_job| - potential_job.name == downstream_job_name - end - end - - if downstream_job - Trigger::Job.new(downstream_project_path, downstream_job.id, downstream_client) - else - Trigger::Pipeline.new(downstream_project_path, pipeline.id, downstream_client) - end + Trigger::Pipeline.new(downstream_project_path, pipeline.id, downstream_client) end def variables @@ -221,6 +206,11 @@ module Trigger end end + # This is used in: + # - https://gitlab.com/gitlab-org/gitlab-runner/-/blob/ddaf90761c917a42ed4aab60541b6bc33871fe68/.gitlab/ci/docs.gitlab-ci.yml#L1-47 + # - https://gitlab.com/gitlab-org/charts/gitlab/-/blob/fa348e709e901196803051669b4874b657b4ea91/.gitlab-ci.yml#L497-543 + # - https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/b44483f05c5e22628ba3b49ec4c7f8761c688af0/gitlab-ci-config/gitlab-com.yml#L199-224 + # - https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/b44483f05c5e22628ba3b49ec4c7f8761c688af0/gitlab-ci-config/gitlab-com.yml#L356-380 class Docs < Base def self.access_token # Default to "DOCS_PROJECT_API_TOKEN" at https://gitlab.com/gitlab-org/gitlab-docs/-/settings/access_tokens @@ -321,7 +311,7 @@ module Trigger class DatabaseTesting < Base IDENTIFIABLE_NOTE_TAG = 'gitlab-org/database-team/gitlab-com-database-testing:identifiable-note' - def invoke!(downstream_job_name: nil) + def invoke! pipeline = super project_path = variables['TOP_UPSTREAM_SOURCE_PROJECT'] merge_request_id = variables['TOP_UPSTREAM_MERGE_REQUEST_IID'] @@ -438,14 +428,10 @@ module Trigger attr_reader :project, :gitlab_client, :start_time end - - Job = Class.new(Pipeline) end if $PROGRAM_NAME == __FILE__ case ARGV[0] - when 'cng' - Trigger::CNG.new.invoke!.wait! when 'gitlab-com-database-testing' Trigger::DatabaseTesting.new.invoke! when 'docs' @@ -463,7 +449,6 @@ if $PROGRAM_NAME == __FILE__ else puts "Please provide a valid option: omnibus - Triggers a pipeline that builds the omnibus-gitlab package - cng - Triggers a pipeline that builds images used by the GitLab helm chart gitlab-com-database-testing - Triggers a pipeline that tests database changes on GitLab.com data" end end diff --git a/scripts/utils.sh b/scripts/utils.sh index c71de666ac6..55005d0abff 100644 --- a/scripts/utils.sh +++ b/scripts/utils.sh @@ -10,6 +10,28 @@ function retry() { return 0 fi done + + return 1 +} + +# Retry after 2s, 4s, 8s, 16s, 32, 64s, 128s +function retry_exponential() { + if eval "$@"; then + return 0 + fi + + local sleep_time=0 + # The last try will be after 2**7 = 128 seconds (2min8s) + for i in 1 2 3 4 5 6 7; do + sleep_time=$((2 ** i)) + + echo "Sleep for $sleep_time seconds..." + sleep $sleep_time + echo "[$(date '+%H:%M:%S')] Attempt #$i..." + if eval "$@"; then + return 0 + fi + done return 1 } @@ -90,7 +112,7 @@ function setup_db_praefect() { function setup_db() { run_timed_command "setup_db_user_only" - run_timed_command_with_metric "bundle exec rake db:drop db:create db:schema:load db:migrate" "setup_db" + run_timed_command_with_metric "bundle exec rake db:drop db:create db:schema:load db:migrate gitlab:db:lock_writes" "setup_db" run_timed_command "setup_db_praefect" } @@ -103,6 +125,10 @@ function install_tff_gem() { run_timed_command "gem install test_file_finder --no-document --version 0.1.4" } +function install_activesupport_gem() { + run_timed_command "gem install activesupport --no-document --version 6.1.7.1" +} + function install_junit_merge_gem() { run_timed_command "gem install junit_merge --no-document --version 0.1.2" } diff --git a/scripts/verify-tff-mapping b/scripts/verify-tff-mapping index 08d9d7a33fd..872f8dae86e 100755 --- a/scripts/verify-tff-mapping +++ b/scripts/verify-tff-mapping @@ -29,12 +29,6 @@ tests = [ }, { - explanation: 'Some EE extensions have specs placement that do not follow the recommendation: https://docs.gitlab.com/ee/development/ee_features.html#testing-ee-features-based-on-ce-features. `tff` should still find these misplaced specs.', - source: 'ee/app/models/ee/project.rb', - expected: ['ee/spec/models/project_spec.rb', 'spec/models/project_spec.rb'] - }, - - { explanation: 'EE lib should map to respective spec.', source: 'ee/lib/world.rb', expected: ['ee/spec/lib/world_spec.rb'] @@ -84,8 +78,8 @@ tests = [ { explanation: 'Migration should map to its non-timestamped spec', - source: 'db/migrate/20210818220234_add_default_project_approval_rules_vuln_allowed.rb', - expected: ['spec/migrations/add_default_project_approval_rules_vuln_allowed_spec.rb'] + source: 'db/migrate/20221014034338_populate_releases_access_level_from_repository.rb', + expected: ['spec/migrations/populate_releases_access_level_from_repository_spec.rb'] }, { @@ -127,13 +121,13 @@ tests = [ { explanation: 'FOSS factory should map to factories spec', source: 'spec/factories/users.rb', - expected: ['spec/models/factories_spec.rb'] + expected: ['ee/spec/models/factories_spec.rb'] }, { explanation: 'EE factory should map to factories spec', source: 'ee/spec/factories/users.rb', - expected: ['spec/models/factories_spec.rb'] + expected: ['ee/spec/models/factories_spec.rb'] }, { |