summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2023-03-29 09:10:35 +0000
committerGitLab Bot <gitlab-bot@gitlab.com>2023-03-29 09:10:35 +0000
commit55dea17c8d10d1b25cd25f1cdaeaed64332995a7 (patch)
treed8deec499e9aacae38b95cb509f9dc7873b0652a /vendor
parentf0224e32ca39fe7b261a8d6bcec64bb449c65856 (diff)
downloadgitlab-ce-55dea17c8d10d1b25cd25f1cdaeaed64332995a7.tar.gz
Add latest changes from gitlab-org/gitlab@master
Diffstat (limited to 'vendor')
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/.gitignore3
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/.gitlab-ci.yml77
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/.rspec1
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/CONTRIBUTING.md41
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/Gemfile14
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/Gemfile.lock58
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/LICENSE165
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/README.md57
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec15
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq-reliable-fetch.rb6
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb269
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb51
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/reliable_fetch.rb38
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb48
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb97
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb195
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb8
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb43
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb116
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/README.md37
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb19
-rwxr-xr-xvendor/gems/sidekiq-reliable-fetch/tests/interruption/test_kill_signal.rb25
-rwxr-xr-xvendor/gems/sidekiq-reliable-fetch/tests/interruption/test_term_signal.rb25
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/interruption/worker.rb15
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb29
-rwxr-xr-xvendor/gems/sidekiq-reliable-fetch/tests/reliability/reliability_test.rb115
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/reliability/worker.rb14
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/support/utils.rb26
28 files changed, 1607 insertions, 0 deletions
diff --git a/vendor/gems/sidekiq-reliable-fetch/.gitignore b/vendor/gems/sidekiq-reliable-fetch/.gitignore
new file mode 100644
index 00000000000..f155298bb0a
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/.gitignore
@@ -0,0 +1,3 @@
+*.gem
+coverage
+.DS_Store
diff --git a/vendor/gems/sidekiq-reliable-fetch/.gitlab-ci.yml b/vendor/gems/sidekiq-reliable-fetch/.gitlab-ci.yml
new file mode 100644
index 00000000000..b87a454bccc
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/.gitlab-ci.yml
@@ -0,0 +1,77 @@
+workflow:
+ rules:
+ - if: $CI_MERGE_REQUEST_ID
+
+default:
+ image: ruby:3.0
+
+before_script:
+ - cd vendor/gems/sidekiq-reliable-fetch
+ - ruby -v
+ - which ruby
+ - gem install bundler
+ - bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
+ - bundle config set with 'development' # This is set to 'deployment' otherwise
+ - bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
+ - bundle config # Show bundler configuration
+ - bundle install --jobs $(nproc) "${FLAGS[@]}"
+
+variables:
+ REDIS_URL: "redis://redis"
+
+rspec:
+ stage: test
+ coverage: '/LOC \((\d+\.\d+%)\) covered.$/'
+ script:
+ - bundle exec rspec
+ services:
+ - redis:alpine
+ artifacts:
+ expire_in: 31d
+ when: always
+ paths:
+ - coverage/
+
+.integration:
+ stage: test
+ script:
+ - cd tests/reliability
+ - bundle exec ruby reliability_test.rb
+ services:
+ - redis:alpine
+
+integration_semi:
+ extends: .integration
+ variables:
+ JOB_FETCHER: semi
+
+integration_reliable:
+ extends: .integration
+ variables:
+ JOB_FETCHER: reliable
+
+integration_basic:
+ extends: .integration
+ allow_failure: yes
+ variables:
+ JOB_FETCHER: basic
+
+kill_interruption:
+ stage: test
+ script:
+ - cd tests/interruption
+ - bundle exec ruby test_kill_signal.rb
+ services:
+ - redis:alpine
+
+term_interruption:
+ stage: test
+ script:
+ - cd tests/interruption
+ - bundle exec ruby test_term_signal.rb
+ services:
+ - redis:alpine
+
+# rubocop:
+# script:
+# - bundle exec rubocop
diff --git a/vendor/gems/sidekiq-reliable-fetch/.rspec b/vendor/gems/sidekiq-reliable-fetch/.rspec
new file mode 100644
index 00000000000..c99d2e7396e
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/.rspec
@@ -0,0 +1 @@
+--require spec_helper
diff --git a/vendor/gems/sidekiq-reliable-fetch/CONTRIBUTING.md b/vendor/gems/sidekiq-reliable-fetch/CONTRIBUTING.md
new file mode 100644
index 00000000000..e9c2788bebe
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/CONTRIBUTING.md
@@ -0,0 +1,41 @@
+## Developer Certificate of Origin and License
+
+By contributing to GitLab B.V., you accept and agree to the following terms and
+conditions for your present and future contributions submitted to GitLab B.V.
+Except for the license granted herein to GitLab B.V. and recipients of software
+distributed by GitLab B.V., you reserve all right, title, and interest in and to
+your Contributions.
+
+All contributions are subject to the Developer Certificate of Origin and license set out at [docs.gitlab.com/ce/legal/developer_certificate_of_origin](https://docs.gitlab.com/ce/legal/developer_certificate_of_origin).
+
+_This notice should stay as the first item in the CONTRIBUTING.md file._
+
+## Code of conduct
+
+As contributors and maintainers of this project, we pledge to respect all people
+who contribute through reporting issues, posting feature requests, updating
+documentation, submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project a harassment-free
+experience for everyone, regardless of level of experience, gender, gender
+identity and expression, sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, or religion.
+
+Examples of unacceptable behavior by participants include the use of sexual
+language or imagery, derogatory comments or personal attacks, trolling, public
+or private harassment, insults, or other unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct. Project maintainers who do not follow the
+Code of Conduct may be removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior can be
+reported by emailing contact@gitlab.com.
+
+This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.1.0,
+available at [https://contributor-covenant.org/version/1/1/0/](https://contributor-covenant.org/version/1/1/0/).
+
diff --git a/vendor/gems/sidekiq-reliable-fetch/Gemfile b/vendor/gems/sidekiq-reliable-fetch/Gemfile
new file mode 100644
index 00000000000..3bed294f56f
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/Gemfile
@@ -0,0 +1,14 @@
+# frozen_string_literal: true
+
+source "https://rubygems.org"
+
+git_source(:github) { |repo_name| "https://github.com/#{repo_name}" }
+
+gemspec
+
+group :test do
+ gem "rspec", '~> 3'
+ gem "pry"
+ gem 'simplecov', require: false
+ gem 'stub_env', '~> 1.0'
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock b/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock
new file mode 100644
index 00000000000..57767ee8c3b
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock
@@ -0,0 +1,58 @@
+PATH
+ remote: .
+ specs:
+ gitlab-sidekiq-fetcher (0.9.0)
+ json (>= 2.5)
+ sidekiq (~> 6.1)
+
+GEM
+ remote: https://rubygems.org/
+ specs:
+ coderay (1.1.2)
+ connection_pool (2.4.0)
+ diff-lcs (1.3)
+ docile (1.3.1)
+ json (2.5.1)
+ method_source (0.9.0)
+ pry (0.11.3)
+ coderay (~> 1.1.0)
+ method_source (~> 0.9.0)
+ rack (2.2.6.4)
+ redis (4.8.1)
+ rspec (3.8.0)
+ rspec-core (~> 3.8.0)
+ rspec-expectations (~> 3.8.0)
+ rspec-mocks (~> 3.8.0)
+ rspec-core (3.8.0)
+ rspec-support (~> 3.8.0)
+ rspec-expectations (3.8.1)
+ diff-lcs (>= 1.2.0, < 2.0)
+ rspec-support (~> 3.8.0)
+ rspec-mocks (3.8.0)
+ diff-lcs (>= 1.2.0, < 2.0)
+ rspec-support (~> 3.8.0)
+ rspec-support (3.8.0)
+ sidekiq (6.5.8)
+ connection_pool (>= 2.2.5, < 3)
+ rack (~> 2.0)
+ redis (>= 4.5.0, < 5)
+ simplecov (0.16.1)
+ docile (~> 1.1)
+ json (>= 1.8, < 3)
+ simplecov-html (~> 0.10.0)
+ simplecov-html (0.10.2)
+ stub_env (1.0.4)
+ rspec (>= 2.0, < 4.0)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ gitlab-sidekiq-fetcher!
+ pry
+ rspec (~> 3)
+ simplecov
+ stub_env (~> 1.0)
+
+BUNDLED WITH
+ 2.3.24
diff --git a/vendor/gems/sidekiq-reliable-fetch/LICENSE b/vendor/gems/sidekiq-reliable-fetch/LICENSE
new file mode 100644
index 00000000000..65c5ca88a67
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/LICENSE
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/gems/sidekiq-reliable-fetch/README.md b/vendor/gems/sidekiq-reliable-fetch/README.md
new file mode 100644
index 00000000000..4c7029e3955
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/README.md
@@ -0,0 +1,57 @@
+gitlab-sidekiq-fetcher
+======================
+
+`gitlab-sidekiq-fetcher` is an extension to Sidekiq that adds support for reliable
+fetches from Redis.
+
+It's based on https://github.com/TEA-ebook/sidekiq-reliable-fetch.
+
+**IMPORTANT NOTE:** Since version `0.7.0` this gem works only with `sidekiq >= 6.1` (which introduced Fetch API breaking changes). Please use version `~> 0.5` if you use older version of the `sidekiq` .
+
+**UPGRADE NOTE:** If upgrading from 0.7.0, strongly consider a full deployed step on 0.7.1 before 0.8.0; that fixes a bug in the queue name validation that will hit if sidekiq nodes running 0.7.0 see working queues named by 0.8.0. See https://gitlab.com/gitlab-org/sidekiq-reliable-fetch/-/merge_requests/22
+
+There are two strategies implemented: [Reliable fetch](http://redis.io/commands/rpoplpush#pattern-reliable-queue) using `rpoplpush` command and
+semi-reliable fetch that uses regular `brpop` and `lpush` to pick the job and put it to working queue. The main benefit of "Reliable" strategy is that `rpoplpush` is atomic, eliminating a race condition in which jobs can be lost.
+However, it comes at a cost because `rpoplpush` can't watch multiple lists at the same time so we need to iterate over the entire queue list which significantly increases pressure on Redis when there are more than a few queues. The "semi-reliable" strategy is much more reliable than the default Sidekiq fetcher, though. Compared to the reliable fetch strategy, it does not increase pressure on Redis significantly.
+
+### Interruption handling
+
+Sidekiq expects any job to report succcess or to fail. In the last case, Sidekiq puts `retry_count` counter
+into the job and keeps to re-run the job until the counter reched the maximum allowed value. When the job has
+not been given a chance to finish its work(to report success or fail), for example, when it was killed forcibly or when the job was requeued, after receiving TERM signal, the standard retry mechanisme does not get into the game and the job will be retried indefinatelly. This is why Reliable fetcher maintains a special counter `interrupted_count`
+which is used to limit the amount of such retries. In both cases, Reliable Fetcher increments counter `interrupted_count` and rejects the job from running again when the counter exceeds `max_retries_after_interruption` times (default: 3 times).
+Such a job will be put to `interrupted` queue. This queue mostly behaves as Sidekiq Dead queue so it only stores a limited amount of jobs for a limited term. Same as for Dead queue, all the limits are configurable via `interrupted_max_jobs` (default: 10_000) and `interrupted_timeout_in_seconds` (default: 3 months) Sidekiq option keys.
+
+You can also disable special handling of interrupted jobs by setting `max_retries_after_interruption` into `-1`.
+In this case, interrupted jobs will be run without any limits from Reliable Fetcher and they won't be put into Interrupted queue.
+
+
+## Installation
+
+This gem is vendored in the GitLab Rails application and new versions are not published to RubyGems.
+
+## Configuration
+
+Enable reliable fetches by calling this gem from your Sidekiq configuration:
+
+```ruby
+Sidekiq.configure_server do |config|
+ Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
+
+ # …
+end
+```
+
+There is an additional parameter `config[:semi_reliable_fetch]` you can use to switch between two strategies:
+
+```ruby
+Sidekiq.configure_server do |config|
+ config[:semi_reliable_fetch] = true # Default value is false
+
+ Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
+end
+```
+
+## License
+
+LGPL-3.0, see the LICENSE file.
diff --git a/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec b/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec
new file mode 100644
index 00000000000..0d0e5e3f6fa
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec
@@ -0,0 +1,15 @@
+Gem::Specification.new do |s|
+ s.name = 'gitlab-sidekiq-fetcher'
+ s.version = '0.9.0'
+ s.authors = ['TEA', 'GitLab']
+ s.email = 'valery@gitlab.com'
+ s.license = 'LGPL-3.0'
+ s.homepage = 'https://gitlab.com/gitlab-org/gitlab/-/tree/master/vendor/gems/sidekiq-reliable-fetch'
+ s.summary = 'Reliable fetch extension for Sidekiq'
+ s.description = 'Redis reliable queue pattern implemented in Sidekiq'
+ s.require_paths = ['lib']
+ s.files = Dir.glob('lib/**/*.*')
+ s.test_files = Dir.glob('{spec,tests}/**/*.*')
+ s.add_dependency 'sidekiq', '~> 6.1'
+ s.add_runtime_dependency 'json', '>= 2.5'
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq-reliable-fetch.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq-reliable-fetch.rb
new file mode 100644
index 00000000000..df44fabaedd
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq-reliable-fetch.rb
@@ -0,0 +1,6 @@
+require 'sidekiq'
+require 'sidekiq/api'
+
+require_relative 'sidekiq/base_reliable_fetch'
+require_relative 'sidekiq/reliable_fetch'
+require_relative 'sidekiq/semi_reliable_fetch'
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb
new file mode 100644
index 00000000000..e9c9f050982
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb
@@ -0,0 +1,269 @@
+# frozen_string_literal: true
+
+require_relative 'interrupted_set'
+
+module Sidekiq
+ class BaseReliableFetch
+ DEFAULT_CLEANUP_INTERVAL = 60 * 60 # 1 hour
+ HEARTBEAT_INTERVAL = 20 # seconds
+ HEARTBEAT_LIFESPAN = 60 # seconds
+ HEARTBEAT_RETRY_DELAY = 1 # seconds
+ WORKING_QUEUE_PREFIX = 'working'
+
+ # Defines how often we try to take a lease to not flood our
+ # Redis server with SET requests
+ DEFAULT_LEASE_INTERVAL = 2 * 60 # seconds
+ LEASE_KEY = 'reliable-fetcher-cleanup-lock'
+
+ # Defines the COUNT parameter that will be passed to Redis SCAN command
+ SCAN_COUNT = 1000
+
+ # How much time a job can be interrupted
+ DEFAULT_MAX_RETRIES_AFTER_INTERRUPTION = 3
+
+ # Regexes for matching working queue keys
+ WORKING_QUEUE_REGEX = /#{WORKING_QUEUE_PREFIX}:(queue:.*):([^:]*:[0-9]*:[0-9a-f]*)\z/.freeze
+ LEGACY_WORKING_QUEUE_REGEX = /#{WORKING_QUEUE_PREFIX}:(queue:.*):([^:]*:[0-9]*)\z/.freeze
+
+ UnitOfWork = Struct.new(:queue, :job) do
+ def acknowledge
+ Sidekiq.redis { |conn| conn.lrem(Sidekiq::BaseReliableFetch.working_queue_name(queue), 1, job) }
+ end
+
+ def queue_name
+ queue.sub(/.*queue:/, '')
+ end
+
+ def requeue
+ Sidekiq.redis do |conn|
+ conn.multi do |multi|
+ multi.lpush(queue, job)
+ multi.lrem(Sidekiq::BaseReliableFetch.working_queue_name(queue), 1, job)
+ end
+ end
+ end
+ end
+
+ def self.setup_reliable_fetch!(config)
+ config = config.options unless config.respond_to?(:[])
+
+ fetch_strategy = if config[:semi_reliable_fetch]
+ Sidekiq::SemiReliableFetch
+ else
+ Sidekiq::ReliableFetch
+ end
+
+ config[:fetch] = fetch_strategy.new(config)
+
+ Sidekiq.logger.info('GitLab reliable fetch activated!')
+
+ start_heartbeat_thread
+ end
+
+ def self.start_heartbeat_thread
+ Thread.new do
+ loop do
+ begin
+ heartbeat
+
+ sleep HEARTBEAT_INTERVAL
+ rescue => e
+ Sidekiq.logger.error("Heartbeat thread error: #{e.message}")
+
+ sleep HEARTBEAT_RETRY_DELAY
+ end
+ end
+ end
+ end
+
+ def self.hostname
+ Socket.gethostname
+ end
+
+ def self.process_nonce
+ @@process_nonce ||= SecureRandom.hex(6)
+ end
+
+ def self.identity
+ @@identity ||= "#{hostname}:#{$$}:#{process_nonce}"
+ end
+
+ def self.heartbeat
+ Sidekiq.redis do |conn|
+ conn.set(heartbeat_key(identity), 1, ex: HEARTBEAT_LIFESPAN)
+ end
+
+ Sidekiq.logger.debug("Heartbeat for #{identity}")
+ end
+
+ def self.worker_dead?(identity, conn)
+ !conn.get(heartbeat_key(identity))
+ end
+
+ def self.heartbeat_key(identity)
+ "reliable-fetcher-heartbeat-#{identity.gsub(':', '-')}"
+ end
+
+ def self.working_queue_name(queue)
+ "#{WORKING_QUEUE_PREFIX}:#{queue}:#{identity}"
+ end
+
+ attr_reader :cleanup_interval, :last_try_to_take_lease_at, :lease_interval,
+ :queues, :use_semi_reliable_fetch,
+ :strictly_ordered_queues
+
+ def initialize(options)
+ raise ArgumentError, 'missing queue list' unless options[:queues]
+
+ @config = options
+ @interrupted_set = Sidekiq::InterruptedSet.new
+ @cleanup_interval = options.fetch(:cleanup_interval, DEFAULT_CLEANUP_INTERVAL)
+ @lease_interval = options.fetch(:lease_interval, DEFAULT_LEASE_INTERVAL)
+ @last_try_to_take_lease_at = 0
+ @strictly_ordered_queues = !!options[:strict]
+ @queues = options[:queues].map { |q| "queue:#{q}" }
+ end
+
+ def retrieve_work
+ clean_working_queues! if take_lease
+
+ retrieve_unit_of_work
+ end
+
+ def retrieve_unit_of_work
+ raise NotImplementedError,
+ "#{self.class} does not implement #{__method__}"
+ end
+
+ def bulk_requeue(inprogress, _options)
+ return if inprogress.empty?
+
+ Sidekiq.redis do |conn|
+ inprogress.each do |unit_of_work|
+ conn.multi do |multi|
+ preprocess_interrupted_job(unit_of_work.job, unit_of_work.queue, multi)
+
+ multi.lrem(self.class.working_queue_name(unit_of_work.queue), 1, unit_of_work.job)
+ end
+ end
+ end
+ rescue => e
+ Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{e.message}")
+ end
+
+ private
+
+ def preprocess_interrupted_job(job, queue, conn = nil)
+ msg = Sidekiq.load_json(job)
+ msg['interrupted_count'] = msg['interrupted_count'].to_i + 1
+
+ if interruption_exhausted?(msg)
+ send_to_quarantine(msg, conn)
+ else
+ requeue_job(queue, msg, conn)
+ end
+ end
+
+ # If you want this method to be run in a scope of multi connection
+ # you need to pass it
+ def requeue_job(queue, msg, conn)
+ with_connection(conn) do |conn|
+ conn.lpush(queue, Sidekiq.dump_json(msg))
+ end
+
+ Sidekiq.logger.info(
+ message: "Pushed job #{msg['jid']} back to queue #{queue}",
+ jid: msg['jid'],
+ queue: queue
+ )
+ end
+
+ def extract_queue_and_identity(key)
+ # New identity format is "{hostname}:{pid}:{randomhex}
+ # Old identity format is "{hostname}:{pid}"
+ # Queue names may also have colons (namespaced).
+ # Expressing this in a single regex is unreadable
+
+ # Test the newer expected format first, only checking the older if necessary
+ original_queue, identity = key.scan(WORKING_QUEUE_REGEX).flatten
+ return original_queue, identity unless original_queue.nil? || identity.nil?
+
+ key.scan(LEGACY_WORKING_QUEUE_REGEX).flatten
+ end
+
+ # Detect "old" jobs and requeue them because the worker they were assigned
+ # to probably failed miserably.
+ def clean_working_queues!
+ Sidekiq.logger.info('Cleaning working queues')
+
+ Sidekiq.redis do |conn|
+ conn.scan_each(match: "#{WORKING_QUEUE_PREFIX}:queue:*", count: SCAN_COUNT) do |key|
+ original_queue, identity = extract_queue_and_identity(key)
+
+ next if original_queue.nil? || identity.nil?
+
+ clean_working_queue!(original_queue, key) if self.class.worker_dead?(identity, conn)
+ end
+ end
+ end
+
+ def clean_working_queue!(original_queue, working_queue)
+ Sidekiq.redis do |conn|
+ while job = conn.rpop(working_queue)
+ preprocess_interrupted_job(job, original_queue)
+ end
+ end
+ end
+
+ def interruption_exhausted?(msg)
+ return false if max_retries_after_interruption(msg['class']) < 0
+
+ msg['interrupted_count'].to_i >= max_retries_after_interruption(msg['class'])
+ end
+
+ def max_retries_after_interruption(worker_class)
+ max_retries_after_interruption = nil
+
+ max_retries_after_interruption ||= begin
+ Object.const_get(worker_class).sidekiq_options[:max_retries_after_interruption]
+ rescue NameError
+ end
+
+ max_retries_after_interruption ||= @config[:max_retries_after_interruption]
+ max_retries_after_interruption ||= DEFAULT_MAX_RETRIES_AFTER_INTERRUPTION
+ max_retries_after_interruption
+ end
+
+ def send_to_quarantine(msg, multi_connection = nil)
+ Sidekiq.logger.warn(
+ class: msg['class'],
+ jid: msg['jid'],
+ message: %(Reliable Fetcher: adding dead #{msg['class']} job #{msg['jid']} to interrupted queue)
+ )
+
+ job = Sidekiq.dump_json(msg)
+ @interrupted_set.put(job, connection: multi_connection)
+ end
+
+ # Yield block with an existing connection or creates another one
+ def with_connection(conn)
+ return yield(conn) if conn
+
+ Sidekiq.redis { |redis_conn| yield(redis_conn) }
+ end
+
+ def take_lease
+ return unless allowed_to_take_a_lease?
+
+ @last_try_to_take_lease_at = Time.now.to_f
+
+ Sidekiq.redis do |conn|
+ conn.set(LEASE_KEY, 1, nx: true, ex: cleanup_interval)
+ end
+ end
+
+ def allowed_to_take_a_lease?
+ Time.now.to_f - last_try_to_take_lease_at > lease_interval
+ end
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb
new file mode 100644
index 00000000000..2fc7a10f9d0
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb
@@ -0,0 +1,51 @@
+require 'sidekiq/api'
+
+module Sidekiq
+ class InterruptedSet < ::Sidekiq::JobSet
+ DEFAULT_MAX_CAPACITY = 10_000
+ DEFAULT_MAX_TIMEOUT = 90 * 24 * 60 * 60 # 3 months
+
+ def initialize
+ super "interrupted"
+ end
+
+ def put(message, opts = {})
+ now = Time.now.to_f
+
+ with_multi_connection(opts[:connection]) do |conn|
+ conn.zadd(name, now.to_s, message)
+ conn.zremrangebyscore(name, '-inf', now - self.class.timeout)
+ conn.zremrangebyrank(name, 0, - self.class.max_jobs)
+ end
+
+ true
+ end
+
+ # Yield block inside an existing multi connection or creates new one
+ def with_multi_connection(conn, &block)
+ return yield(conn) if conn
+
+ Sidekiq.redis do |c|
+ c.multi do |multi|
+ yield(multi)
+ end
+ end
+ end
+
+ def retry_all
+ each(&:retry) while size > 0
+ end
+
+ def self.max_jobs
+ options[:interrupted_max_jobs] || DEFAULT_MAX_CAPACITY
+ end
+
+ def self.timeout
+ options[:interrupted_timeout_in_seconds] || DEFAULT_MAX_TIMEOUT
+ end
+
+ def self.options
+ Sidekiq.respond_to?(:[]) ? Sidekiq : Sidekiq.options
+ end
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/reliable_fetch.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/reliable_fetch.rb
new file mode 100644
index 00000000000..e3088ab70a4
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/reliable_fetch.rb
@@ -0,0 +1,38 @@
+# frozen_string_literal: true
+
+module Sidekiq
+ class ReliableFetch < BaseReliableFetch
+ # For reliable fetch we don't use Redis' blocking operations so
+ # we inject a regular sleep into the loop.
+ RELIABLE_FETCH_IDLE_TIMEOUT = 5 # seconds
+
+ attr_reader :queues_size
+
+ def initialize(options)
+ super
+
+ @queues = queues.uniq if strictly_ordered_queues
+ @queues_size = queues.size
+ end
+
+ private
+
+ def retrieve_unit_of_work
+ queues_list = strictly_ordered_queues ? queues : queues.shuffle
+
+ queues_list.each do |queue|
+ work = Sidekiq.redis do |conn|
+ conn.rpoplpush(queue, self.class.working_queue_name(queue))
+ end
+
+ return UnitOfWork.new(queue, work) if work
+ end
+
+ # We didn't find a job in any of the configured queues. Let's sleep a bit
+ # to avoid uselessly burning too much CPU
+ sleep(RELIABLE_FETCH_IDLE_TIMEOUT)
+
+ nil
+ end
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb
new file mode 100644
index 00000000000..5b8a601dde1
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb
@@ -0,0 +1,48 @@
+# frozen_string_literal: true
+
+module Sidekiq
+ class SemiReliableFetch < BaseReliableFetch
+ # We want the fetch operation to timeout every few seconds so the thread
+ # can check if the process is shutting down. This constant is only used
+ # for semi-reliable fetch.
+ DEFAULT_SEMI_RELIABLE_FETCH_TIMEOUT = 2 # seconds
+
+ def initialize(options)
+ super
+
+ if strictly_ordered_queues
+ @queues = @queues.uniq
+ @queues << { timeout: semi_reliable_fetch_timeout }
+ end
+ end
+
+ private
+
+ def retrieve_unit_of_work
+ work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
+ return unless work
+
+ unit_of_work = UnitOfWork.new(*work)
+
+ Sidekiq.redis do |conn|
+ conn.lpush(self.class.working_queue_name(unit_of_work.queue), unit_of_work.job)
+ end
+
+ unit_of_work
+ end
+
+ def queues_cmd
+ if strictly_ordered_queues
+ @queues
+ else
+ queues = @queues.shuffle.uniq
+ queues << { timeout: semi_reliable_fetch_timeout }
+ queues
+ end
+ end
+
+ def semi_reliable_fetch_timeout
+ @semi_reliable_fetch_timeout ||= ENV['SIDEKIQ_SEMI_RELIABLE_FETCH_TIMEOUT']&.to_i || DEFAULT_SEMI_RELIABLE_FETCH_TIMEOUT
+ end
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb b/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb
new file mode 100644
index 00000000000..cdc4409f0d5
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb
@@ -0,0 +1,97 @@
+require 'spec_helper'
+require 'fetch_shared_examples'
+require 'sidekiq/base_reliable_fetch'
+require 'sidekiq/reliable_fetch'
+require 'sidekiq/semi_reliable_fetch'
+
+describe Sidekiq::BaseReliableFetch do
+ let(:job) { Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo']) }
+
+ before { Sidekiq.redis(&:flushdb) }
+
+ describe 'UnitOfWork' do
+ let(:fetcher) { Sidekiq::ReliableFetch.new(queues: ['foo']) }
+
+ describe '#requeue' do
+ it 'requeues job' do
+ Sidekiq.redis { |conn| conn.rpush('queue:foo', job) }
+
+ uow = fetcher.retrieve_work
+
+ uow.requeue
+
+ expect(Sidekiq::Queue.new('foo').size).to eq 1
+ expect(working_queue_size('foo')).to eq 0
+ end
+ end
+
+ describe '#acknowledge' do
+ it 'acknowledges job' do
+ Sidekiq.redis { |conn| conn.rpush('queue:foo', job) }
+
+ uow = fetcher.retrieve_work
+
+ expect { uow.acknowledge }
+ .to change { working_queue_size('foo') }.by(-1)
+
+ expect(Sidekiq::Queue.new('foo').size).to eq 0
+ end
+ end
+ end
+
+ describe '#bulk_requeue' do
+ let(:options) { { queues: %w[foo bar] } }
+ let!(:queue1) { Sidekiq::Queue.new('foo') }
+ let!(:queue2) { Sidekiq::Queue.new('bar') }
+
+ it 'requeues the bulk' do
+ uow = described_class::UnitOfWork
+ jobs = [ uow.new('queue:foo', job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
+ described_class.new(options).bulk_requeue(jobs, nil)
+
+ expect(queue1.size).to eq 2
+ expect(queue2.size).to eq 1
+ end
+
+ it 'puts jobs into interrupted queue' do
+ uow = described_class::UnitOfWork
+ interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
+ jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
+ described_class.new(options).bulk_requeue(jobs, nil)
+
+ expect(queue1.size).to eq 1
+ expect(queue2.size).to eq 1
+ expect(Sidekiq::InterruptedSet.new.size).to eq 1
+ end
+
+ it 'does not put jobs into interrupted queue if it is disabled' do
+ options[:max_retries_after_interruption] = -1
+
+ uow = described_class::UnitOfWork
+ interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
+ jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
+ described_class.new(options).bulk_requeue(jobs, nil)
+
+ expect(queue1.size).to eq 2
+ expect(queue2.size).to eq 1
+ expect(Sidekiq::InterruptedSet.new.size).to eq 0
+ end
+ end
+
+ it 'sets heartbeat' do
+ config = double(:sidekiq_config, options: { queues: %w[foo bar] })
+
+ heartbeat_thread = described_class.setup_reliable_fetch!(config)
+
+ Sidekiq.redis do |conn|
+ sleep 0.2 # Give the time to heartbeat thread to make a loop
+
+ heartbeat_key = described_class.heartbeat_key(described_class.identity)
+ heartbeat = conn.get(heartbeat_key)
+
+ expect(heartbeat).not_to be_nil
+ end
+
+ heartbeat_thread.kill
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb b/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb
new file mode 100644
index 00000000000..df7f715f2f9
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb
@@ -0,0 +1,195 @@
+shared_examples 'a Sidekiq fetcher' do
+ let(:queues) { ['assigned'] }
+
+ before { Sidekiq.redis(&:flushdb) }
+
+ describe '#retrieve_work' do
+ let(:job) { Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo']) }
+ let(:fetcher) { described_class.new(queues: queues) }
+
+ it 'does not clean up orphaned jobs more than once per cleanup interval' do
+ Sidekiq.redis = Sidekiq::RedisConnection.create(url: REDIS_URL, size: 10)
+
+ expect(fetcher).to receive(:clean_working_queues!).once
+
+ threads = 10.times.map do
+ Thread.new do
+ fetcher.retrieve_work
+ end
+ end
+
+ threads.map(&:join)
+ end
+
+ it 'retrieves by order when strictly order is enabled' do
+ fetcher = described_class.new(strict: true, queues: ['first', 'second'])
+
+ Sidekiq.redis do |conn|
+ conn.rpush('queue:first', ['msg3', 'msg2', 'msg1'])
+ conn.rpush('queue:second', 'msg4')
+ end
+
+ jobs = (1..4).map { fetcher.retrieve_work.job }
+
+ expect(jobs).to eq ['msg1', 'msg2', 'msg3', 'msg4']
+ end
+
+ it 'does not starve any queue when queues are not strictly ordered' do
+ fetcher = described_class.new(queues: ['first', 'second'])
+
+ Sidekiq.redis do |conn|
+ conn.rpush('queue:first', (1..200).map { |i| "msg#{i}" })
+ conn.rpush('queue:second', 'this_job_should_not_stuck')
+ end
+
+ jobs = (1..100).map { fetcher.retrieve_work.job }
+
+ expect(jobs).to include 'this_job_should_not_stuck'
+ end
+
+ shared_examples "basic queue handling" do |queue|
+ let (:fetcher) { described_class.new(queues: [queue]) }
+
+ it 'retrieves the job and puts it to working queue' do
+ Sidekiq.redis { |conn| conn.rpush("queue:#{queue}", job) }
+
+ uow = fetcher.retrieve_work
+
+ expect(working_queue_size(queue)).to eq 1
+ expect(uow.queue_name).to eq queue
+ expect(uow.job).to eq job
+ expect(Sidekiq::Queue.new(queue).size).to eq 0
+ end
+
+ it 'does not retrieve a job from foreign queue' do
+ Sidekiq.redis { |conn| conn.rpush("'queue:#{queue}:not", job) }
+ expect(fetcher.retrieve_work).to be_nil
+
+ Sidekiq.redis { |conn| conn.rpush("'queue:not_#{queue}", job) }
+ expect(fetcher.retrieve_work).to be_nil
+
+ Sidekiq.redis { |conn| conn.rpush("'queue:random_name", job) }
+ expect(fetcher.retrieve_work).to be_nil
+ end
+
+ it 'requeues jobs from legacy dead working queue with incremented interrupted_count' do
+ Sidekiq.redis do |conn|
+ conn.rpush(legacy_other_process_working_queue_name(queue), job)
+ end
+
+ expected_job = Sidekiq.load_json(job)
+ expected_job['interrupted_count'] = 1
+ expected_job = Sidekiq.dump_json(expected_job)
+
+ uow = fetcher.retrieve_work
+
+ expect(uow).to_not be_nil
+ expect(uow.job).to eq expected_job
+
+ Sidekiq.redis do |conn|
+ expect(conn.llen(legacy_other_process_working_queue_name(queue))).to eq 0
+ end
+ end
+
+ it 'ignores working queue keys in unknown formats' do
+ # Add a spurious non-numeric char segment at the end; this simulates any other
+ # incorrect form in general
+ malformed_key = "#{other_process_working_queue_name(queue)}:X"
+ Sidekiq.redis do |conn|
+ conn.rpush(malformed_key, job)
+ end
+
+ uow = fetcher.retrieve_work
+
+ Sidekiq.redis do |conn|
+ expect(conn.llen(malformed_key)).to eq 1
+ end
+ end
+
+ it 'requeues jobs from dead working queue with incremented interrupted_count' do
+ Sidekiq.redis do |conn|
+ conn.rpush(other_process_working_queue_name(queue), job)
+ end
+
+ expected_job = Sidekiq.load_json(job)
+ expected_job['interrupted_count'] = 1
+ expected_job = Sidekiq.dump_json(expected_job)
+
+ uow = fetcher.retrieve_work
+
+ expect(uow).to_not be_nil
+ expect(uow.job).to eq expected_job
+
+ Sidekiq.redis do |conn|
+ expect(conn.llen(other_process_working_queue_name(queue))).to eq 0
+ end
+ end
+
+ it 'does not requeue jobs from live working queue' do
+ working_queue = live_other_process_working_queue_name(queue)
+
+ Sidekiq.redis do |conn|
+ conn.rpush(working_queue, job)
+ end
+
+ uow = fetcher.retrieve_work
+
+ expect(uow).to be_nil
+
+ Sidekiq.redis do |conn|
+ expect(conn.llen(working_queue)).to eq 1
+ end
+ end
+ end
+
+ context 'with various queues' do
+ %w[assigned namespace:assigned namespace:deeper:assigned].each do |queue|
+ it_behaves_like "basic queue handling", queue
+ end
+ end
+
+ context 'with short cleanup interval' do
+ let(:short_interval) { 1 }
+ let(:fetcher) { described_class.new(queues: queues, lease_interval: short_interval, cleanup_interval: short_interval) }
+
+ it 'requeues when there is no heartbeat' do
+ Sidekiq.redis { |conn| conn.rpush('queue:assigned', job) }
+ # Use of retrieve_work twice with a sleep ensures we have exercised the
+ # `identity` method to create the working queue key name and that it
+ # matches the patterns used in the cleanup
+ uow = fetcher.retrieve_work
+ sleep(short_interval + 1)
+ uow = fetcher.retrieve_work
+
+ # Will only receive a UnitOfWork if the job was detected as failed and requeued
+ expect(uow).to_not be_nil
+ end
+ end
+ end
+end
+
+def working_queue_size(queue_name)
+ Sidekiq.redis do |c|
+ c.llen(Sidekiq::BaseReliableFetch.working_queue_name("queue:#{queue_name}"))
+ end
+end
+
+def legacy_other_process_working_queue_name(queue)
+ "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}"
+end
+
+def other_process_working_queue_name(queue)
+ "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}:#{::SecureRandom.hex(6)}"
+end
+
+def live_other_process_working_queue_name(queue)
+ pid = ::Process.pid + 1
+ hostname = Socket.gethostname
+ nonce = SecureRandom.hex(6)
+
+ Sidekiq.redis do |conn|
+ conn.set(Sidekiq::BaseReliableFetch.heartbeat_key("#{hostname}-#{pid}-#{nonce}"), 1)
+ end
+
+ "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{hostname}:#{pid}:#{nonce}"
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb b/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb
new file mode 100644
index 00000000000..bdef04a021f
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb
@@ -0,0 +1,8 @@
+require 'spec_helper'
+require 'fetch_shared_examples'
+require 'sidekiq/base_reliable_fetch'
+require 'sidekiq/reliable_fetch'
+
+describe Sidekiq::ReliableFetch do
+ include_examples 'a Sidekiq fetcher'
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb b/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb
new file mode 100644
index 00000000000..84a0203b683
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb
@@ -0,0 +1,43 @@
+require 'spec_helper'
+require 'fetch_shared_examples'
+require 'sidekiq/base_reliable_fetch'
+require 'sidekiq/semi_reliable_fetch'
+
+describe Sidekiq::SemiReliableFetch do
+ include_examples 'a Sidekiq fetcher'
+
+ describe '#retrieve_work' do
+ context 'timeout config' do
+ let(:queues) { ['stuff_to_do'] }
+ let(:fetcher) { described_class.new(queues: queues) }
+
+ before do
+ stub_env('SIDEKIQ_SEMI_RELIABLE_FETCH_TIMEOUT', timeout)
+ end
+
+ context 'when the timeout is not configured' do
+ let(:timeout) { nil }
+
+ it 'brpops with the default timeout timeout' do
+ Sidekiq.redis do |connection|
+ expect(connection).to receive(:brpop).with("queue:stuff_to_do", { timeout: 2 }).once.and_call_original
+
+ fetcher.retrieve_work
+ end
+ end
+ end
+
+ context 'when the timeout is set in the env' do
+ let(:timeout) { '5' }
+
+ it 'brpops with the default timeout timeout' do
+ Sidekiq.redis do |connection|
+ expect(connection).to receive(:brpop).with("queue:stuff_to_do", { timeout: 5 }).once.and_call_original
+
+ fetcher.retrieve_work
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb b/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb
new file mode 100644
index 00000000000..45418571579
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb
@@ -0,0 +1,116 @@
+require 'sidekiq'
+require 'sidekiq/api'
+require 'pry'
+require 'simplecov'
+require 'stub_env'
+
+SimpleCov.start
+
+REDIS_URL = ENV['REDIS_URL'] || 'redis://localhost:6379/10'
+
+Sidekiq.configure_client do |config|
+ config.redis = { url: REDIS_URL }
+end
+
+Sidekiq.logger.level = Logger::ERROR
+# This file was generated by the `rspec --init` command. Conventionally, all
+# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
+# The generated `.rspec` file contains `--require spec_helper` which will cause
+# this file to always be loaded, without a need to explicitly require it in any
+# files.
+#
+# Given that it is always loaded, you are encouraged to keep this file as
+# light-weight as possible. Requiring heavyweight dependencies from this file
+# will add to the boot time of your test suite on EVERY test run, even for an
+# individual file that may not need all of that loaded. Instead, consider making
+# a separate helper file that requires the additional dependencies and performs
+# the additional setup, and require it from the spec files that actually need
+# it.
+#
+# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
+RSpec.configure do |config|
+ config.include StubEnv::Helpers
+ # rspec-expectations config goes here. You can use an alternate
+ # assertion/expectation library such as wrong or the stdlib/minitest
+ # assertions if you prefer.
+ config.expect_with :rspec do |expectations|
+ # This option will default to `true` in RSpec 4. It makes the `description`
+ # and `failure_message` of custom matchers include text for helper methods
+ # defined using `chain`, e.g.:
+ # be_bigger_than(2).and_smaller_than(4).description
+ # # => "be bigger than 2 and smaller than 4"
+ # ...rather than:
+ # # => "be bigger than 2"
+ expectations.include_chain_clauses_in_custom_matcher_descriptions = true
+ end
+
+ # rspec-mocks config goes here. You can use an alternate test double
+ # library (such as bogus or mocha) by changing the `mock_with` option here.
+ config.mock_with :rspec do |mocks|
+ # Prevents you from mocking or stubbing a method that does not exist on
+ # a real object. This is generally recommended, and will default to
+ # `true` in RSpec 4.
+ mocks.verify_partial_doubles = true
+ end
+
+ # This option will default to `:apply_to_host_groups` in RSpec 4 (and will
+ # have no way to turn it off -- the option exists only for backwards
+ # compatibility in RSpec 3). It causes shared context metadata to be
+ # inherited by the metadata hash of host groups and examples, rather than
+ # triggering implicit auto-inclusion in groups with matching metadata.
+ config.shared_context_metadata_behavior = :apply_to_host_groups
+
+# The settings below are suggested to provide a good initial experience
+# with RSpec, but feel free to customize to your heart's content.
+=begin
+ # This allows you to limit a spec run to individual examples or groups
+ # you care about by tagging them with `:focus` metadata. When nothing
+ # is tagged with `:focus`, all examples get run. RSpec also provides
+ # aliases for `it`, `describe`, and `context` that include `:focus`
+ # metadata: `fit`, `fdescribe` and `fcontext`, respectively.
+ config.filter_run_when_matching :focus
+
+ # Allows RSpec to persist some state between runs in order to support
+ # the `--only-failures` and `--next-failure` CLI options. We recommend
+ # you configure your source control system to ignore this file.
+ config.example_status_persistence_file_path = "spec/examples.txt"
+
+ # Limits the available syntax to the non-monkey patched syntax that is
+ # recommended. For more details, see:
+ # - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/
+ # - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
+ # - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode
+ config.disable_monkey_patching!
+
+ # This setting enables warnings. It's recommended, but in some cases may
+ # be too noisy due to issues in dependencies.
+ config.warnings = true
+
+ # Many RSpec users commonly either run the entire suite or an individual
+ # file, and it's useful to allow more verbose output when running an
+ # individual spec file.
+ if config.files_to_run.one?
+ # Use the documentation formatter for detailed output,
+ # unless a formatter has already been configured
+ # (e.g. via a command-line flag).
+ config.default_formatter = "doc"
+ end
+
+ # Print the 10 slowest examples and example groups at the
+ # end of the spec run, to help surface which specs are running
+ # particularly slow.
+ config.profile_examples = 10
+
+ # Run specs in random order to surface order dependencies. If you find an
+ # order dependency and want to debug it, you can fix the order by providing
+ # the seed, which is printed after each run.
+ # --seed 1234
+ config.order = :random
+
+ # Seed global randomization in this process using the `--seed` CLI option.
+ # Setting this allows you to use `--seed` to deterministically reproduce
+ # test failures related to randomization by passing the same `--seed` value
+ # as the one that triggered the failure.
+ Kernel.srand config.seed
+=end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/README.md b/vendor/gems/sidekiq-reliable-fetch/tests/README.md
new file mode 100644
index 00000000000..62ea6f48641
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/README.md
@@ -0,0 +1,37 @@
+# How to run reliability tests
+
+```
+cd tests/reliability
+bundle exec ruby reliability_test.rb
+```
+
+You can adjust some parameters of the test in the `config.rb`.
+
+JOB_FETCHER can be set to one of these values: `semi`, `reliable`, `basic`
+
+You need to have redis server running on default HTTP port `6379`. To use other HTTP port, you can define
+`REDIS_URL` environment varible with the port you need(example: `REDIS_URL="redis://localhost:9999"`).
+
+
+## How it works
+
+This tool spawns configured number of Sidekiq workers and when the amount of processed jobs is about half of origin
+number it will kill all the workers with `kill -9` and then it will spawn new workers again until all the jobs are processed. To track the process and counters we use Redis keys/counters.
+
+# How to run interruption tests
+
+```
+cd tests/interruption
+
+# Verify "KILL" signal
+bundle exec ruby test_kill_signal.rb
+
+# Verify "TERM" signal
+bundle exec ruby test_term_signal.rb
+```
+
+It requires Redis to be running on 6379 port.
+
+## How it works
+
+It spawns Sidekiq workers then creates a job that will kill itself after a moment. The reliable fetcher will bring it back. The purpose is to verify that job is run no more then allowed number of times.
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb
new file mode 100644
index 00000000000..f69cca96d80
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb
@@ -0,0 +1,19 @@
+# frozen_string_literal: true
+
+require_relative '../../lib/sidekiq-reliable-fetch'
+require_relative 'worker'
+
+TEST_CLEANUP_INTERVAL = 20
+TEST_LEASE_INTERVAL = 5
+
+Sidekiq.configure_server do |config|
+ config[:semi_reliable_fetch] = true
+
+ # We need to override these parameters to not wait too long
+ # The default values are good for production use only
+ # These will be ignored for :basic
+ config[:cleanup_interval] = TEST_CLEANUP_INTERVAL
+ config[:lease_interval] = TEST_LEASE_INTERVAL
+
+ Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/interruption/test_kill_signal.rb b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/test_kill_signal.rb
new file mode 100755
index 00000000000..6f61f25970b
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/test_kill_signal.rb
@@ -0,0 +1,25 @@
+# frozen_string_literal: true
+
+require 'sidekiq'
+require_relative 'config'
+require_relative '../support/utils'
+
+EXPECTED_NUM_TIMES_BEEN_RUN = 3
+NUM_WORKERS = EXPECTED_NUM_TIMES_BEEN_RUN + 1
+
+Sidekiq.redis(&:flushdb)
+
+pids = spawn_workers(NUM_WORKERS)
+
+RetryTestWorker.perform_async
+
+sleep 300
+
+Sidekiq.redis do |redis|
+ times_has_been_run = redis.get('times_has_been_run').to_i
+ assert 'The job has been run', times_has_been_run, EXPECTED_NUM_TIMES_BEEN_RUN
+end
+
+assert 'Found interruption exhausted jobs', Sidekiq::InterruptedSet.new.size, 1
+
+stop_workers(pids)
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/interruption/test_term_signal.rb b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/test_term_signal.rb
new file mode 100755
index 00000000000..218cd4cbc9d
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/test_term_signal.rb
@@ -0,0 +1,25 @@
+# frozen_string_literal: true
+
+require 'sidekiq'
+require_relative 'config'
+require_relative '../support/utils'
+
+EXPECTED_NUM_TIMES_BEEN_RUN = 3
+NUM_WORKERS = EXPECTED_NUM_TIMES_BEEN_RUN + 1
+
+Sidekiq.redis(&:flushdb)
+
+pids = spawn_workers(NUM_WORKERS)
+
+RetryTestWorker.perform_async('TERM', 60)
+
+sleep 300
+
+Sidekiq.redis do |redis|
+ times_has_been_run = redis.get('times_has_been_run').to_i
+ assert 'The job has been run', times_has_been_run, EXPECTED_NUM_TIMES_BEEN_RUN
+end
+
+assert 'Found interruption exhausted jobs', Sidekiq::InterruptedSet.new.size, 1
+
+stop_workers(pids)
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/interruption/worker.rb b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/worker.rb
new file mode 100644
index 00000000000..0e19bc635f7
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/worker.rb
@@ -0,0 +1,15 @@
+# frozen_string_literal: true
+
+class RetryTestWorker
+ include Sidekiq::Worker
+
+ def perform(signal = 'KILL', wait_seconds = 1)
+ Sidekiq.redis do |redis|
+ redis.incr('times_has_been_run')
+ end
+
+ Process.kill(signal, Process.pid)
+
+ sleep wait_seconds
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb
new file mode 100644
index 00000000000..05ffcfca9b5
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb
@@ -0,0 +1,29 @@
+# frozen_string_literal: true
+
+require_relative '../../lib/sidekiq-reliable-fetch'
+require_relative 'worker'
+
+REDIS_FINISHED_LIST = 'reliable-fetcher-finished-jids'
+
+NUMBER_OF_WORKERS = ENV['NUMBER_OF_WORKERS'] || 10
+NUMBER_OF_JOBS = ENV['NUMBER_OF_JOBS'] || 1000
+JOB_FETCHER = (ENV['JOB_FETCHER'] || :semi).to_sym # :basic, :semi, :reliable
+TEST_CLEANUP_INTERVAL = 20
+TEST_LEASE_INTERVAL = 5
+WAIT_CLEANUP = TEST_CLEANUP_INTERVAL +
+ TEST_LEASE_INTERVAL +
+ Sidekiq::ReliableFetch::HEARTBEAT_LIFESPAN
+
+Sidekiq.configure_server do |config|
+ if %i[semi reliable].include?(JOB_FETCHER)
+ config[:semi_reliable_fetch] = (JOB_FETCHER == :semi)
+
+ # We need to override these parameters to not wait too long
+ # The default values are good for production use only
+ # These will be ignored for :basic
+ config[:cleanup_interval] = TEST_CLEANUP_INTERVAL
+ config[:lease_interval] = TEST_LEASE_INTERVAL
+
+ Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/reliability/reliability_test.rb b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/reliability_test.rb
new file mode 100755
index 00000000000..6324971fe8f
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/reliability_test.rb
@@ -0,0 +1,115 @@
+# frozen_string_literal: true
+
+require 'sidekiq'
+require 'sidekiq/cli'
+require_relative 'config'
+
+def spawn_workers_and_stop_them_on_a_half_way
+ pids = spawn_workers
+
+ wait_until do |queue_size|
+ queue_size < NUMBER_OF_JOBS / 2
+ end
+
+ first_half_pids, second_half_pids = split_array(pids)
+
+ puts 'Killing half of the workers...'
+ signal_to_workers('KILL', first_half_pids)
+
+ puts 'Stopping another half of the workers...'
+ signal_to_workers('TERM', second_half_pids)
+end
+
+def spawn_workers_and_let_them_finish
+ puts 'Spawn workers and let them finish...'
+
+ pids = spawn_workers
+
+ wait_until do |queue_size|
+ queue_size.zero?
+ end
+
+ if %i[semi reliable].include? JOB_FETCHER
+ puts 'Waiting for clean up process that will requeue dead jobs...'
+ sleep WAIT_CLEANUP
+ end
+
+ signal_to_workers('TERM', pids)
+end
+
+def wait_until
+ loop do
+ sleep 3
+
+ queue_size = current_queue_size
+ puts "Jobs in the queue:#{queue_size}"
+
+ break if yield(queue_size)
+ end
+end
+
+def signal_to_workers(signal, pids)
+ pids.each { |pid| Process.kill(signal, pid) }
+ pids.each { |pid| Process.wait(pid) }
+end
+
+def spawn_workers
+ pids = []
+ NUMBER_OF_WORKERS.times do
+ pids << spawn('sidekiq -q default -q low -q high -r ./config.rb')
+ end
+
+ pids
+end
+
+def current_queue_size
+ Sidekiq.redis { |c| c.llen('queue:default') }
+end
+
+def duplicates
+ Sidekiq.redis { |c| c.llen(REDIS_FINISHED_LIST) }
+end
+
+# Splits array into two halves
+def split_array(arr)
+ first_arr = arr.take(arr.size / 2)
+ second_arr = arr - first_arr
+ [first_arr, second_arr]
+end
+
+##########################################################
+
+puts '########################################'
+puts "Mode: #{JOB_FETCHER}"
+puts '########################################'
+
+Sidekiq.redis(&:flushdb)
+
+jobs = []
+
+NUMBER_OF_JOBS.times do
+ jobs << ReliabilityTestWorker.perform_async
+end
+
+puts "Queued #{NUMBER_OF_JOBS} jobs"
+
+spawn_workers_and_stop_them_on_a_half_way
+spawn_workers_and_let_them_finish
+
+jobs_lost = 0
+
+Sidekiq.redis do |redis|
+ jobs.each do |job|
+ next if redis.lrem(REDIS_FINISHED_LIST, 1, job) == 1
+ jobs_lost += 1
+ end
+end
+
+puts "Remaining unprocessed: #{jobs_lost}"
+puts "Duplicates found: #{duplicates}"
+
+if jobs_lost.zero? && duplicates.zero?
+ exit 0
+else
+ exit 1
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/reliability/worker.rb b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/worker.rb
new file mode 100644
index 00000000000..8b580a67ec1
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/worker.rb
@@ -0,0 +1,14 @@
+# frozen_string_literal: true
+
+class ReliabilityTestWorker
+ include Sidekiq::Worker
+
+ def perform
+ # To mimic long running job and to increase the probability of losing the job
+ sleep 1
+
+ Sidekiq.redis do |redis|
+ redis.lpush(REDIS_FINISHED_LIST, jid)
+ end
+ end
+end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/support/utils.rb b/vendor/gems/sidekiq-reliable-fetch/tests/support/utils.rb
new file mode 100644
index 00000000000..c481c6dad80
--- /dev/null
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/support/utils.rb
@@ -0,0 +1,26 @@
+def assert(text, actual, expected)
+ if actual == expected
+ puts "#{text}: #{actual} (Success)"
+ else
+ puts "#{text}: #{actual} (Failed). Expected: #{expected}"
+ exit 1
+ end
+end
+
+def spawn_workers(number)
+ pids = []
+
+ number.times do
+ pids << spawn('sidekiq -q default -q high -q low -r ./config.rb')
+ end
+
+ pids
+end
+
+# Stop Sidekiq workers
+def stop_workers(pids)
+ pids.each do |pid|
+ Process.kill('KILL', pid)
+ Process.wait pid
+ end
+end