summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml4
-rw-r--r--doc/source/client.rst252
-rw-r--r--doc/source/concepts.rst8
-rw-r--r--doc/source/config/job.rst118
-rw-r--r--doc/source/config/project.rst4
-rw-r--r--doc/source/config/semaphore.rst4
-rw-r--r--doc/source/developer/model-changelog.rst7
-rw-r--r--doc/source/developer/zookeeper.rst7
-rw-r--r--doc/source/drivers/gerrit.rst6
-rw-r--r--doc/source/drivers/github.rst31
-rw-r--r--doc/source/gating.rst22
-rw-r--r--doc/source/howtos/openid-with-keycloak.rst6
-rw-r--r--doc/source/tenants.rst111
-rw-r--r--playbooks/tutorial/quick-start.yaml2
-rw-r--r--playbooks/zuul-stream/templates/ansible.cfg.j22
-rw-r--r--releasenotes/notes/add-buildset-statusurl.yaml5
-rw-r--r--releasenotes/notes/always-dynamic-dce165ca8b6e212f.yaml10
-rw-r--r--releasenotes/notes/ansible-5-0c9d6626294579e0.yaml5
-rw-r--r--releasenotes/notes/cherry-pick-merges-9c78fd914b682671.yaml7
-rw-r--r--releasenotes/notes/deduplicate-ac171d3206eb43b3.yaml6
-rw-r--r--releasenotes/notes/dependency-validation-000f63204da83b4a.yaml12
-rw-r--r--releasenotes/notes/gearmanectomy-ecec32b3764eafc1.yaml2
-rw-r--r--releasenotes/notes/gerrit-ssh-server-a183363bac9d7ff8.yaml6
-rw-r--r--releasenotes/notes/global-semaphores-719828567158982a.yaml5
-rw-r--r--releasenotes/notes/include-branches-493b6bb04414341b.yaml9
-rw-r--r--releasenotes/notes/prune-database-eca38464b9606254.yaml21
-rw-r--r--releasenotes/notes/repo_cache-5524b183a8bc3e6a.yaml8
-rw-r--r--releasenotes/notes/wait-for-init-934370422b22b442.yaml8
-rw-r--r--releasenotes/notes/zuul-admin-cli-53f52cff1aad986c.yaml7
-rw-r--r--setup.cfg3
-rw-r--r--tests/base.py191
-rw-r--r--tests/fakegitlab.py5
-rw-r--r--tests/fixtures/config/circular-dependencies/git/common-config/zuul.yaml1
-rw-r--r--tests/fixtures/config/dynamic-only-project/dynamic.yaml10
-rw-r--r--tests/fixtures/config/dynamic-only-project/exclude.yaml10
-rw-r--r--tests/fixtures/config/dynamic-only-project/git/common-config/playbooks/run.yaml1
-rw-r--r--tests/fixtures/config/dynamic-only-project/git/common-config/zuul.yaml67
-rw-r--r--tests/fixtures/config/dynamic-only-project/git/org_project/README1
-rw-r--r--tests/fixtures/config/dynamic-only-project/git/org_project/zuul.yaml17
-rw-r--r--tests/fixtures/config/dynamic-only-project/include.yaml11
-rw-r--r--tests/fixtures/config/global-semaphores-config/broken.yaml10
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/common-config/playbooks/run.yaml1
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/common-config/zuul.yaml52
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/org_project1/README1
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/org_project1/zuul.yaml19
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/org_project2/README1
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/org_project2/zuul.yaml14
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/org_project3/README1
-rw-r--r--tests/fixtures/config/global-semaphores-config/git/org_project3/zuul.yaml20
-rw-r--r--tests/fixtures/config/global-semaphores-config/main.yaml34
-rw-r--r--tests/fixtures/config/global-semaphores/git/common-config/playbooks/run.yaml1
-rw-r--r--tests/fixtures/config/global-semaphores/git/common-config/zuul.yaml44
-rw-r--r--tests/fixtures/config/global-semaphores/git/org_project1/README1
-rw-r--r--tests/fixtures/config/global-semaphores/git/org_project1/zuul.yaml4
-rw-r--r--tests/fixtures/config/global-semaphores/git/org_project2/README1
-rw-r--r--tests/fixtures/config/global-semaphores/git/org_project2/zuul.yaml4
-rw-r--r--tests/fixtures/config/global-semaphores/main.yaml25
-rw-r--r--tests/fixtures/config/remote-zuul-stream/git/org_project/playbooks/command.yaml25
-rw-r--r--tests/fixtures/config/remote-zuul-stream/git/org_project/roles/echo-role/tasks/main.yaml2
-rw-r--r--tests/fixtures/config/remote-zuul-stream/git/org_project/roles/include-echo-role/tasks/main.yaml3
-rw-r--r--tests/fixtures/config/tenant-parser/exclude-branches.yaml12
-rw-r--r--tests/fixtures/config/tenant-parser/include-branches.yaml12
-rw-r--r--tests/fixtures/layouts/circular-deps-node-failure.yaml62
-rw-r--r--tests/fixtures/layouts/job-dedup-auto-shared.yaml65
-rw-r--r--tests/fixtures/layouts/job-dedup-auto-unshared.yaml62
-rw-r--r--tests/fixtures/layouts/job-dedup-auto.yaml61
-rw-r--r--tests/fixtures/layouts/job-dedup-empty-nodeset.yaml61
-rw-r--r--tests/fixtures/layouts/job-dedup-false.yaml66
-rw-r--r--tests/fixtures/layouts/job-dedup-parent-data.yaml81
-rw-r--r--tests/fixtures/layouts/job-dedup-retry-child.yaml65
-rw-r--r--tests/fixtures/layouts/job-dedup-retry.yaml66
-rw-r--r--tests/fixtures/layouts/job-dedup-semaphore-first.yaml71
-rw-r--r--tests/fixtures/layouts/job-dedup-semaphore.yaml70
-rw-r--r--tests/fixtures/layouts/job-dedup-true.yaml63
-rw-r--r--tests/fixtures/layouts/merging-gitlab-squash-merge.yaml28
-rw-r--r--tests/fixtures/layouts/sos-circular.yaml1
-rw-r--r--tests/fixtures/layouts/two-projects-integrated.yaml16
-rw-r--r--tests/fixtures/zuul-gerrit-ssh.conf38
-rw-r--r--tests/remote/test_remote_zuul_stream.py26
-rw-r--r--tests/unit/test_circular_dependencies.py438
-rw-r--r--tests/unit/test_client.py155
-rw-r--r--tests/unit/test_configloader.py77
-rw-r--r--tests/unit/test_connection.py2
-rw-r--r--tests/unit/test_database.py44
-rw-r--r--tests/unit/test_gerrit.py100
-rw-r--r--tests/unit/test_github_driver.py108
-rw-r--r--tests/unit/test_gitlab_driver.py16
-rw-r--r--tests/unit/test_global_semaphores.py169
-rw-r--r--tests/unit/test_model_upgrade.py42
-rw-r--r--tests/unit/test_scheduler.py76
-rw-r--r--tests/unit/test_v3.py153
-rw-r--r--tests/unit/test_web.py129
-rw-r--r--tools/deprecated-queues.py61
-rwxr-xr-xtools/test-logs.sh10
-rw-r--r--web/src/containers/logfile/LogFile.jsx5
-rw-r--r--web/src/reducers/initialState.js2
-rwxr-xr-xzuul/ansible/2.8/library/command.py21
-rwxr-xr-xzuul/ansible/2.9/library/command.py21
-rw-r--r--zuul/ansible/base/action/command.py14
-rw-r--r--zuul/ansible/base/callback/zuul_stream.py16
-rwxr-xr-xzuul/ansible/base/library/command.py21
-rw-r--r--zuul/ansible/paths.py17
-rwxr-xr-xzuul/cmd/client.py110
-rwxr-xr-xzuul/cmd/executor.py17
-rw-r--r--zuul/cmd/fingergw.py32
-rwxr-xr-xzuul/cmd/merger.py20
-rwxr-xr-xzuul/cmd/scheduler.py23
-rw-r--r--zuul/configloader.py287
-rw-r--r--zuul/connection/__init__.py32
-rw-r--r--zuul/driver/elasticsearch/reporter.py4
-rw-r--r--zuul/driver/gerrit/gerritconnection.py178
-rw-r--r--zuul/driver/gerrit/gerritreporter.py5
-rw-r--r--zuul/driver/github/githubconnection.py25
-rw-r--r--zuul/driver/github/githubreporter.py38
-rw-r--r--zuul/driver/github/githubsource.py4
-rw-r--r--zuul/driver/gitlab/gitlabconnection.py3
-rw-r--r--zuul/driver/gitlab/gitlabreporter.py17
-rw-r--r--zuul/driver/mqtt/mqttreporter.py4
-rw-r--r--zuul/driver/pagure/pagurereporter.py25
-rw-r--r--zuul/driver/smtp/smtpreporter.py4
-rw-r--r--zuul/driver/sql/alembic/versions/c7467b642498_buildset_updated.py47
-rw-r--r--zuul/driver/sql/sqlconnection.py17
-rw-r--r--zuul/driver/sql/sqlreporter.py2
-rw-r--r--zuul/lib/ansible-config.conf3
-rw-r--r--zuul/lib/encryption.py16
-rw-r--r--zuul/lib/fingergw.py4
-rw-r--r--zuul/manager/__init__.py117
-rw-r--r--zuul/merger/merger.py24
-rw-r--r--zuul/model.py269
-rw-r--r--zuul/model_api.py2
-rw-r--r--zuul/reporter/__init__.py23
-rw-r--r--zuul/scheduler.py45
-rw-r--r--zuul/source/__init__.py10
-rwxr-xr-xzuul/web/__init__.py155
-rw-r--r--zuul/zk/__init__.py9
-rw-r--r--zuul/zk/branch_cache.py11
-rw-r--r--zuul/zk/event_queues.py12
-rw-r--r--zuul/zk/job_request_queue.py8
-rw-r--r--zuul/zk/semaphore.py71
139 files changed, 4843 insertions, 728 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 00d0a7053..3efcff86b 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -102,7 +102,7 @@
- job:
name: zuul-tox-py310
parent: zuul-tox
- timeout: 5400 # 90 minutes
+ timeout: 7200 # 120 minutes
vars:
tox_envlist: py310
python_version: "3.10"
@@ -110,7 +110,7 @@
- job:
name: zuul-tox-py38
parent: zuul-tox
- timeout: 5400 # 90 minutes
+ timeout: 7200 # 120 minutes
vars:
tox_envlist: py38
python_version: "3.8"
diff --git a/doc/source/client.rst b/doc/source/client.rst
index 0177c7347..cde18bfff 100644
--- a/doc/source/client.rst
+++ b/doc/source/client.rst
@@ -1,11 +1,12 @@
-:title: Zuul Client
+:title: Zuul Admin Client
-Zuul Client
-===========
+Zuul Admin Client
+=================
Zuul includes a simple command line client that may be used to affect Zuul's
-behavior while running. It must be run on a host with access to Zuul's web
-server.
+behavior while running.
+
+.. note:: For operations related to normal workflow like enqueue, dequeue, autohold and promote, the `zuul-client` CLI should be used instead.
Configuration
-------------
@@ -13,76 +14,177 @@ Configuration
The client uses the same zuul.conf file as the server, and will look
for it in the same locations if not specified on the command line.
-The ``webclient`` section is required.
-
-It is also possible to run the client without a configuration file, by using the
-``--zuul-url`` option to specify the base URL of the Zuul web server.
-
-.. note:: Not all commands are available through the REST API.
-
Usage
-----
The general options that apply to all subcommands are:
-.. program-output:: zuul --help
+.. program-output:: zuul-admin --help
The following subcommands are supported:
+tenant-conf-check
+^^^^^^^^^^^^^^^^^
+
+.. program-output:: zuul-admin tenant-conf-check --help
+
+Example::
+
+ zuul-admin tenant-conf-check
+
+This command validates the tenant configuration schema. It exits '-1' in
+case of errors detected.
+
+create-auth-token
+^^^^^^^^^^^^^^^^^
+
+.. note:: This command is only available if an authenticator is configured in
+ ``zuul.conf``. Furthermore the authenticator's configuration must
+ include a signing secret.
+
+.. program-output:: zuul-admin create-auth-token --help
+
+Example::
+
+ zuul-admin create-auth-token --auth-config zuul-operator --user alice --tenant tenantA --expires-in 1800
+
+The return value is the value of the ``Authorization`` header the user must set
+when querying a protected endpoint on Zuul's REST API.
+
+Example::
+
+ bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwOi8vbWFuYWdlc2Yuc2ZyZG90ZXN0aW5zdGFuY2Uub3JnIiwienV1bC50ZW5hbnRzIjp7ImxvY2FsIjoiKiJ9LCJleHAiOjE1Mzc0MTcxOTguMzc3NTQ0fQ.DLbKx1J84wV4Vm7sv3zw9Bw9-WuIka7WkPQxGDAHz7s
+
+export-keys
+^^^^^^^^^^^
+
+.. program-output:: zuul-admin export-keys --help
+
+Example::
+
+ zuul-admin export-keys /var/backup/zuul-keys.json
+
+import-keys
+^^^^^^^^^^^
+
+.. program-output:: zuul-admin import-keys --help
+
+Example::
+
+ zuul-admin import-keys /var/backup/zuul-keys.json
+
+copy-keys
+^^^^^^^^^
+
+.. program-output:: zuul-admin copy-keys --help
+
+Example::
+
+ zuul-admin copy-keys gerrit old_project gerrit new_project
+
+delete-keys
+^^^^^^^^^^^
+
+.. program-output:: zuul-admin delete-keys --help
+
+Example::
+
+ zuul-admin delete-keys gerrit old_project
+
+delete-state
+^^^^^^^^^^^^
+
+.. program-output:: zuul-admin delete-state --help
+
+Example::
+
+ zuul-admin delete-state
+
+delete-pipeline-state
+^^^^^^^^^^^^^^^^^^^^^
+
+.. program-output:: zuul-admin delete-pipeline-state --help
+
+Example::
+
+ zuul-admin delete-pipeline-state tenant pipeline
+
+prune-database
+^^^^^^^^^^^^^^
+
+.. program-output:: zuul-admin prune-database --help
+
+Example::
+
+ zuul-admin prune-database --older-than 180d
+
+Deprecated commands
+-------------------
+
+The following commands are deprecated in the zuul-admin CLI, and thus may not be entirely supported in Zuul's current version.
+They will be removed in a future release of Zuul. They can still be performed via the `zuul-client` CLI.
+Please refer to `zuul-client's documentation <https://zuul-ci.org/docs/zuul-client/>`__
+for more details.
+
+In order to run these commands, the ``webclient`` section is required in the configuration file.
+
+It is also possible to run the client without a configuration file, by using the
+``--zuul-url`` option to specify the base URL of the Zuul web server.
+
Autohold
^^^^^^^^
-.. program-output:: zuul autohold --help
+.. program-output:: zuul-admin autohold --help
Example::
- zuul autohold --tenant openstack --project example_project --job example_job --reason "reason text" --count 1
+ zuul-admin autohold --tenant openstack --project example_project --job example_job --reason "reason text" --count 1
Autohold Delete
^^^^^^^^^^^^^^^
-.. program-output:: zuul autohold-delete --help
+.. program-output:: zuul-admin autohold-delete --help
Example::
- zuul autohold-delete --id 0000000123
+ zuul-admin autohold-delete --id 0000000123
Autohold Info
^^^^^^^^^^^^^
-.. program-output:: zuul autohold-info --help
+.. program-output:: zuul-admin autohold-info --help
Example::
- zuul autohold-info --id 0000000123
+ zuul-admin autohold-info --id 0000000123
Autohold List
^^^^^^^^^^^^^
-.. program-output:: zuul autohold-list --help
+.. program-output:: zuul-admin autohold-list --help
Example::
- zuul autohold-list --tenant openstack
+ zuul-admin autohold-list --tenant openstack
Dequeue
^^^^^^^
-.. program-output:: zuul dequeue --help
+.. program-output:: zuul-admin dequeue --help
Examples::
- zuul dequeue --tenant openstack --pipeline check --project example_project --change 5,1
- zuul dequeue --tenant openstack --pipeline periodic --project example_project --ref refs/heads/master
+ zuul-admin dequeue --tenant openstack --pipeline check --project example_project --change 5,1
+ zuul-admin dequeue --tenant openstack --pipeline periodic --project example_project --ref refs/heads/master
Enqueue
^^^^^^^
-.. program-output:: zuul enqueue --help
+.. program-output:: zuul-admin enqueue --help
Example::
- zuul enqueue --tenant openstack --trigger gerrit --pipeline check --project example_project --change 12345,1
+ zuul-admin enqueue --tenant openstack --trigger gerrit --pipeline check --project example_project --change 12345,1
Note that the format of change id is <number>,<patchset>.
Enqueue-ref
^^^^^^^^^^^
-.. program-output:: zuul enqueue-ref --help
+.. program-output:: zuul-admin enqueue-ref --help
This command is provided to manually simulate a trigger from an
external source. It can be useful for testing or replaying a trigger
@@ -106,7 +208,7 @@ the jobs, pass the failed tag as the ``ref`` argument and set
``newrev`` to the change associated with the tag in the project
repository (i.e. what you see from ``git show X.Y.Z``)::
- zuul enqueue-ref --tenant openstack --trigger gerrit --pipeline release --project openstack/example_project --ref refs/tags/X.Y.Z --newrev abc123..
+ zuul-admin enqueue-ref --tenant openstack --trigger gerrit --pipeline release --project openstack/example_project --ref refs/tags/X.Y.Z --newrev abc123..
The command can also be used asynchronosly trigger a job in a
``periodic`` pipeline that would usually be run at a specific time by
@@ -114,7 +216,7 @@ the ``timer`` driver. For example, the following command would
trigger the ``periodic`` jobs against the current ``master`` branch
top-of-tree for a project::
- zuul enqueue-ref --tenant openstack --trigger timer --pipeline periodic --project openstack/example_project --ref refs/heads/master
+ zuul-admin enqueue-ref --tenant openstack --trigger timer --pipeline periodic --project openstack/example_project --ref refs/heads/master
Another common pipeline is a ``post`` queue listening for ``gerrit``
merge results. Triggering here is slightly more complicated as you
@@ -128,7 +230,7 @@ current ``HEAD`` and the prior change, then enqueue the event::
NEW_REF=$(git rev-parse HEAD)
OLD_REF=$(git rev-parse HEAD~1)
- zuul enqueue-ref --tenant openstack --trigger gerrit --pipeline post --project openstack/example_project --ref refs/heads/master --newrev $NEW_REF --oldrev $OLD_REF
+ zuul-admin enqueue-ref --tenant openstack --trigger gerrit --pipeline post --project openstack/example_project --ref refs/heads/master --newrev $NEW_REF --oldrev $OLD_REF
Note that zero values for ``oldrev`` and ``newrev`` can indicate
branch creation and deletion; the source code is the best reference
@@ -138,11 +240,11 @@ for these more advanced operations.
Promote
^^^^^^^
-.. program-output:: zuul promote --help
+.. program-output:: zuul-admin promote --help
Example::
- zuul promote --tenant openstack --pipeline gate --changes 12345,1 13336,3
+ zuul-admin promote --tenant openstack --pipeline gate --changes 12345,1 13336,3
Note that the format of changes id is <number>,<patchset>.
@@ -164,89 +266,3 @@ If items in independent pipelines are promoted, no jobs will be
restarted, but their change queues within the pipeline will be
re-ordered so that they will be processed first and their node request
priorities will increase.
-
-tenant-conf-check
-^^^^^^^^^^^^^^^^^
-
-.. program-output:: zuul tenant-conf-check --help
-
-Example::
-
- zuul tenant-conf-check
-
-This command validates the tenant configuration schema. It exits '-1' in
-case of errors detected.
-
-create-auth-token
-^^^^^^^^^^^^^^^^^
-
-.. note:: This command is only available if an authenticator is configured in
- ``zuul.conf``. Furthermore the authenticator's configuration must
- include a signing secret.
-
-.. program-output:: zuul create-auth-token --help
-
-Example::
-
- zuul create-auth-token --auth-config zuul-operator --user alice --tenant tenantA --expires-in 1800
-
-The return value is the value of the ``Authorization`` header the user must set
-when querying a protected endpoint on Zuul's REST API.
-
-Example::
-
- bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJodHRwOi8vbWFuYWdlc2Yuc2ZyZG90ZXN0aW5zdGFuY2Uub3JnIiwienV1bC50ZW5hbnRzIjp7ImxvY2FsIjoiKiJ9LCJleHAiOjE1Mzc0MTcxOTguMzc3NTQ0fQ.DLbKx1J84wV4Vm7sv3zw9Bw9-WuIka7WkPQxGDAHz7s
-
-export-keys
-^^^^^^^^^^^
-
-.. program-output:: zuul export-keys --help
-
-Example::
-
- zuul export-keys /var/backup/zuul-keys.json
-
-import-keys
-^^^^^^^^^^^
-
-.. program-output:: zuul import-keys --help
-
-Example::
-
- zuul import-keys /var/backup/zuul-keys.json
-
-copy-keys
-^^^^^^^^^
-
-.. program-output:: zuul copy-keys --help
-
-Example::
-
- zuul copy-keys gerrit old_project gerrit new_project
-
-delete-keys
-^^^^^^^^^^^
-
-.. program-output:: zuul delete-keys --help
-
-Example::
-
- zuul delete-keys gerrit old_project
-
-delete-state
-^^^^^^^^^^^^
-
-.. program-output:: zuul delete-state --help
-
-Example::
-
- zuul delete-state
-
-delete-pipeline-state
-^^^^^^^^^^^^^^^^^^^^^
-
-.. program-output:: zuul delete-pipeline-state --help
-
-Example::
-
- zuul delete-pipeline-state tenant pipeline
diff --git a/doc/source/concepts.rst b/doc/source/concepts.rst
index 7bf47e581..92384dbf6 100644
--- a/doc/source/concepts.rst
+++ b/doc/source/concepts.rst
@@ -53,8 +53,12 @@ substantially different than the git repo state of the change itself
since the repo may have merged other changes since the change was
originally authored). Items in a pipeline may depend on other items,
and if they do, all of their dependent changes will be included in the
-git repo state that Zuul prepares. For more detail on this process,
-see :ref:`project_gating` and :ref:`dependencies`.
+git repo state that Zuul prepares. Jobs may also specify that they
+require additional git repos, and if so, the repo state (as of the
+time when the item was enqueued in the pipeline) for those repos will
+also be included. For more detail on this process, see
+:ref:`project_gating`, :ref:`dependencies`, and
+:ref:`global_repo_state`.
The configuration for nearly everything described above is held in
files inside of the git repos upon which Zuul operates. Zuul's
diff --git a/doc/source/config/job.rst b/doc/source/config/job.rst
index da30bc56f..87806727f 100644
--- a/doc/source/config/job.rst
+++ b/doc/source/config/job.rst
@@ -18,7 +18,10 @@ starting with very basic jobs which describe characteristics that all
jobs on the system should have, progressing through stages of
specialization before arriving at a particular job. A job may inherit
from any other job in any project (however, if the other job is marked
-as :attr:`job.final`, jobs may not inherit from it).
+as :attr:`job.final`, jobs may not inherit from it). Generally,
+attributes on child jobs will override (or completely replace)
+attributes on the parent, however some attributes are combined. See
+the documentation for individual attributes for these exceptions.
A job with no parent is called a *base job* and may only be defined in
a :term:`config-project`. Every other job must have a parent, and so
@@ -58,6 +61,7 @@ be:
* child playbook
* child post-run playbook
* parent post-run playbook
+* parent cleanup-run playbook
Further inheritance would nest even deeper.
@@ -146,6 +150,11 @@ Here is an example of two job definitions:
To indicate a job is not intended to be run directly, but
instead must be inherited from, set this attribute to ``true``.
+ Once this is set to ``true`` in a job it cannot be reset to
+ ``false`` within the same job by other variants; however jobs
+ which inherit from it can (and by default do) reset it to
+ ``false``.
+
.. warning::
It is possible to circumvent the use of `abstract` in an
@@ -162,6 +171,11 @@ Here is an example of two job definitions:
*must* also be ``abstract``; a configuration error will be
raised if not.
+ Once this is set to ``true`` in a job it cannot be reset to
+ ``false`` within the same job by other variants; however jobs
+ which inherit from it can (and by default do) reset it to
+ ``false``.
+
For example, you may define a base abstract job `foo` and create
two abstract jobs that inherit from `foo` called
`foo-production` and `foo-development`. If it would be an error
@@ -205,14 +219,14 @@ Here is an example of two job definitions:
.. attr:: semaphores
- The name of a :ref:`semaphore` (or list of them) which should be
- acquired and released when the job begins and ends. If the
- semaphore is at maximum capacity, then Zuul will wait until it
- can be acquired before starting the job. The format is either a
- string, a dictionary, or a list of either of those in the case
- of multiple semaphores. If it's a string it references a
- semaphore using the default value for
- :attr:`job.semaphores.resources-first`.
+ The name of a :ref:`semaphore` (or list of them) or
+ :ref:`global_semaphore` which should be acquired and released
+ when the job begins and ends. If the semaphore is at maximum
+ capacity, then Zuul will wait until it can be acquired before
+ starting the job. The format is either a string, a dictionary,
+ or a list of either of those in the case of multiple
+ semaphores. If it's a string it references a semaphore using the
+ default value for :attr:`job.semaphores.resources-first`.
If multiple semaphores are requested, the job will not start
until all have been acquired, and Zuul will wait until all are
@@ -258,6 +272,10 @@ Here is an example of two job definitions:
by this job which may be used by other jobs for other changes
using the :attr:`job.requires` attribute.
+ When inheriting jobs or applying variants, the list of
+ `provides` is extended (`provides` specified in a job definition
+ are added to any supplied by their parents).
+
.. attr:: requires
A list of free-form strings which identify resources which may
@@ -277,6 +295,10 @@ Here is an example of two job definitions:
returned by the `provides` jobs will be made available to the
`requires` job.
+ When inheriting jobs or applying variants, the list of
+ `requires` is extended (`requires` specified in a job definition
+ are added to any supplied by their parents).
+
For example, a job which produces a builder container image in
one project that is then consumed by a container image build job
in another project might look like this:
@@ -488,6 +510,10 @@ Here is an example of two job definitions:
even when the job is canceled. Cleanup results are not taken into
account.
+ When a job inherits from a parent, the child's cleanup-run
+ playbooks are run before the parent's. See :ref:`job` for more
+ information.
+
.. attr:: run
The name of a playbook or list of playbooks for this job. If it
@@ -539,15 +565,18 @@ Here is an example of two job definitions:
appear on the job -- roles earlier in the list will take
precedence over those which follow.
- In the case of job inheritance or variance, the roles used for
- each of the playbooks run by the job will be only those which
- were defined along with that playbook. If a child job inherits
- from a parent which defines a pre and post playbook, then the
- pre and post playbooks it inherits from the parent job will run
- only with the roles that were defined on the parent. If the
- child adds its own pre and post playbooks, then any roles added
- by the child will be available to the child's playbooks. This
- is so that a job which inherits from a parent does not
+ This attribute is not overridden on inheritance or variance;
+ instead roles are added with each new job or variant. In the
+ case of job inheritance or variance, the roles used for each of
+ the playbooks run by the job will be only those which were
+ cumulatively defined up to that point in the inheritance
+ hierarchy where that playbook was added. If a child job
+ inherits from a parent which defines a pre and post playbook,
+ then the pre and post playbooks it inherits from the parent job
+ will run only with the roles that were defined on the parent.
+ If the child adds its own pre and post playbooks, then any roles
+ added by the child will be available to the child's playbooks.
+ This is so that a job which inherits from a parent does not
inadvertently alter the behavior of the parent's playbooks by
the addition of conflicting roles. Roles added by a child will
appear before those it inherits from its parent.
@@ -625,9 +654,15 @@ Here is an example of two job definitions:
A list of other projects which are used by this job. Any Zuul
projects specified here will also be checked out by Zuul into
the working directory for the job. Speculative merging and
- cross-repo dependencies will be honored.
+ cross-repo dependencies will be honored. If there is not a
+ change for the project ahead in the pipeline, its repo state as
+ of the time the item was enqueued will be frozen and used for
+ all jobs for a given change (see :ref:`global_repo_state`).
- This attribute is a union of all applicable parents and variants.
+ This attribute is not overridden by inheritance; instead it is
+ the union of all applicable parents and variants (i.e., jobs can
+ expand but not reduce the set of required projects when they
+ inherit).
The format for this attribute is either a list of strings or
dictionaries. Strings are interpreted as project names,
@@ -777,6 +812,11 @@ Here is an example of two job definitions:
any project's pipeline. Apply caution when doing so as other
projects may be able to expose the source project's secrets.
+ This attribute is not overridden by inheritance; instead it is
+ the intersection of all applicable parents and variants (i.e.,
+ jobs can reduce but not expand the set of allowed projects when
+ they inherit).
+
.. warning::
It is possible to circumvent the use of `allowed-projects` in
@@ -906,6 +946,44 @@ Here is an example of two job definitions:
self-testing without requiring that the file matchers include
the Zuul configuration file defining the job.
+ .. attr:: deduplicate
+ :default: auto
+
+ In the case of a dependency cycle where multiple changes within
+ the cycle run the same job, this setting indicates whether Zuul
+ should attempt to deduplicate the job. If it is deduplicated,
+ then the job will only run for one queue item within the cycle
+ and other items which run the same job will use the results of
+ that build.
+
+ This setting determins whether Zuul will consider deduplication.
+ If it is set to ``false``, Zuul will never attempt to
+ deduplicate the job. If it is set to ``auto`` (the default),
+ then Zuul will compare the job with other jobs of other queue
+ items in the dependency cycle, and if they are equivalent and
+ meet certain project criteria, it will deduplicate them.
+
+ The project criteria that Zuul considers under the ``auto``
+ setting are either:
+
+ * The job must specify :attr:`job.required-projects`.
+ * Or the queue items must be for the same project.
+
+ This is because of the following heuristic: if a job specifies
+ :attr:`job.required-projects`, it is most likely to be one which
+ operates in the same way regardless of which project the change
+ under test belongs to, therefore the result of the same job
+ running on two queue items in the same dependency cycle should
+ be the same. If a job does not specify
+ :attr:`job.required-projects` and runs with two different
+ projects under test, the outcome is likely different for those
+ two items.
+
+ If this is not true for a job (e.g., the job ignores the project
+ under test and interacts only with external resources)
+ :attr:`job.deduplicate` may be set to ``true`` to ignore the
+ heuristic and deduplicate anyway.
+
.. attr:: workspace-scheme
:default: golang
diff --git a/doc/source/config/project.rst b/doc/source/config/project.rst
index af5053295..301e0b08d 100644
--- a/doc/source/config/project.rst
+++ b/doc/source/config/project.rst
@@ -166,10 +166,6 @@ pipeline.
stanza; it may appear in secondary instances or even in a
:ref:`project-template` definition.
- Pipeline managers other than `dependent` do not use this
- attribute, however, it may still be used if
- :attr:`scheduler.relative_priority` is enabled.
-
.. note:: This attribute is not evaluated speculatively and
its setting shall be merged to be effective.
diff --git a/doc/source/config/semaphore.rst b/doc/source/config/semaphore.rst
index 74440ae79..91bfc9a4d 100644
--- a/doc/source/config/semaphore.rst
+++ b/doc/source/config/semaphore.rst
@@ -15,6 +15,10 @@ project as long as the value is the same. This is to aid in branch
maintenance, so that creating a new branch based on an existing branch
will not immediately produce a configuration error.
+Zuul also supports global semaphores (see :ref:`global_semaphore`)
+which may only be created by the Zuul administrator, but can be used
+to coordinate resources across multiple tenants.
+
Semaphores are never subject to dynamic reconfiguration. If the value
of a semaphore is changed, it will take effect only when the change
where it is updated is merged. However, Zuul will attempt to validate
diff --git a/doc/source/developer/model-changelog.rst b/doc/source/developer/model-changelog.rst
index efdf50e3d..0d4cb5077 100644
--- a/doc/source/developer/model-changelog.rst
+++ b/doc/source/developer/model-changelog.rst
@@ -79,3 +79,10 @@ Version 7
Playbook secret references are now either an integer
index into the job secret list, or a dict with a blob
store key. This affects schedulers and executors.
+
+Version 8
+---------
+
+:Prior Zuul version: 6.0.0
+:Description: Deduplicates jobs in dependency cycles. Affects
+ schedulers only.
diff --git a/doc/source/developer/zookeeper.rst b/doc/source/developer/zookeeper.rst
index c14047ad6..ee77f815f 100644
--- a/doc/source/developer/zookeeper.rst
+++ b/doc/source/developer/zookeeper.rst
@@ -414,6 +414,13 @@ This is a reference for object layout in Zookeeper.
An election to decide which scheduler will report system-wide stats
(such as total node requests).
+.. path:: zuul/global-semaphores/<semaphore>
+ :type: SemaphoreHandler
+
+ Represents a global semaphore (shared by multiple tenants).
+ Information about which builds hold the semaphore is stored in the
+ znode data.
+
.. path:: zuul/semaphores/<tenant>/<semaphore>
:type: SemaphoreHandler
diff --git a/doc/source/drivers/gerrit.rst b/doc/source/drivers/gerrit.rst
index 8d746d465..7d052f946 100644
--- a/doc/source/drivers/gerrit.rst
+++ b/doc/source/drivers/gerrit.rst
@@ -51,6 +51,12 @@ The supported options in ``zuul.conf`` connections are:
Fully qualified domain name of Gerrit server.
+ .. attr:: ssh_server
+
+ If SSH access to the Gerrit server should be via a different
+ hostname than web access, set this value to the hostname to use
+ for SSH connections.
+
.. attr:: canonical_hostname
The canonical hostname associated with the git repos on the
diff --git a/doc/source/drivers/github.rst b/doc/source/drivers/github.rst
index 42a56bf2a..9ca7a1f38 100644
--- a/doc/source/drivers/github.rst
+++ b/doc/source/drivers/github.rst
@@ -182,6 +182,37 @@ The supported options in ``zuul.conf`` connections are:
Enable or disable GitHub rate limit logging. If rate limiting is disabled
in GitHub Enterprise this can save some network round trip times.
+ .. attr:: repo_cache
+
+ To configure Zuul to use a GitHub Enterprise `repository cache
+ <https://docs.github.com/en/enterprise-server@3.3/admin/enterprise-management/caching-repositories/about-repository-caching>`_
+ set this value to the hostname of the cache (e.g.,
+ ``europe-ci.github.example.com``). Zuul will fetch commits as
+ well as determine the global repo state of repositories used in
+ jobs from this host.
+
+ This setting is incompatible with :attr:`<github
+ connection>.sshkey`.
+
+ Because the repository cache may be several minutes behind the
+ canonical site, enabling this setting automatically sets the
+ default :attr:`<github connection>.repo_retry_timeout` to 600
+ seconds. That setting may still be overidden to specify a
+ different value.
+
+ .. attr:: repo_retry_timeout
+
+ This setting is only used if :attr:`<github
+ connection>.repo_cache` is set. It specifies the amount of time
+ in seconds that Zuul mergers and executors should spend
+ attempting to fetch git commits which are not available from the
+ GitHub repository cache host.
+
+ When :attr:`<github connection>.repo_cache` is set, this value
+ defaults to 600 seconds, but it can be overridden. Zuul retries
+ git fetches every 30 seconds, and this value will be rounded up
+ to the next highest multiple of 30 seconds.
+
Trigger Configuration
---------------------
GitHub webhook events can be configured as triggers.
diff --git a/doc/source/gating.rst b/doc/source/gating.rst
index e6b4fed47..325313b54 100644
--- a/doc/source/gating.rst
+++ b/doc/source/gating.rst
@@ -405,7 +405,21 @@ change in project C.
Cycles
~~~~~~
-If a cycle is created by use of cross-project dependencies, Zuul will
-abort its work very early. There will be no message in Gerrit and no
-changes that are part of the cycle will be enqueued into any pipeline.
-This is to protect Zuul from infinite loops.
+Zuul supports cycles that are created by use of cross-project dependencies.
+However this feature is opt-in and can be configured on the queue.
+See :attr:`queue.allow-circular-dependencies` for information on how to
+configure this.
+
+.. _global_repo_state:
+
+Global Repo State
+~~~~~~~~~~~~~~~~~
+
+If a git repository is used by at least one job for a queue item, then
+Zuul will freeze the repo state (i.e., branch heads and tags) and use
+that same state for every job run for that queue item. Not every job
+will get a git repo checkout of every repo, but for any repo that is
+checked out, it will have the same state. Because of this, authors
+can be sure that jobs running on the same queue item have a consistent
+view of all involved git repos, even if one job starts running much
+later than another.
diff --git a/doc/source/howtos/openid-with-keycloak.rst b/doc/source/howtos/openid-with-keycloak.rst
index e80376801..74d3a27c0 100644
--- a/doc/source/howtos/openid-with-keycloak.rst
+++ b/doc/source/howtos/openid-with-keycloak.rst
@@ -10,7 +10,7 @@ Prerequisites
* The Zuul instance must be able to query Keycloak over HTTPS.
* Authenticating users must be able to reach Keycloak's web UI.
* Have a realm set up in Keycloak.
- `Instructions on how to do so can be found here <https://www.keycloak.org/docs/latest/getting_started/index.html#creating-a-realm-and-user>`_ .
+ `Instructions on how to do so can be found here <https://www.keycloak.org/docs/latest/server_admin/#configuring-realms>`_ .
By convention, we will assume the Keycloak server's FQDN is ``keycloak``, and
Zuul's Web UI's base URL is ``https://zuul/``. We will use the realm ``my_realm``.
@@ -81,7 +81,7 @@ Keycloak can delegate authentication to predefined social networks. Follow
If you don't set up authentication delegation, make sure to create at least one
user in your realm, or allow self-registration. See Keycloak's documentation section
-on `user management <https://www.keycloak.org/docs/latest/server_admin/index.html#user-management>`_
+on `user management <https://www.keycloak.org/docs/latest/server_admin/index.html#assembly-managing-users_server_administration_guide>`_
for more details on how to do so.
Setting up Zuul
@@ -107,4 +107,4 @@ Further Reading
---------------
This How-To is based on `Keycloak's documentation <https://www.keycloak.org/documentation.html>`_,
-specifically `the documentation about clients <https://www.keycloak.org/docs/latest/server_admin/#_clients>`_.
+specifically `the documentation about clients <https://www.keycloak.org/docs/latest/server_admin/#assembly-managing-clients_server_administration_guide>`_.
diff --git a/doc/source/tenants.rst b/doc/source/tenants.rst
index 7835eef46..fbbb458a5 100644
--- a/doc/source/tenants.rst
+++ b/doc/source/tenants.rst
@@ -211,6 +211,66 @@ configuration. Some examples of tenant definitions are:
exclude-unprotected-branches. This currently only affects
GitHub and GitLab projects.
+ .. attr:: include-branches
+
+ A list of regexes matching branches which should be
+ processed. If omitted, all branches are included.
+ Operates after *exclude-unprotected-branches* and so may
+ be used to further reduce the set of branches (but not
+ increase it).
+
+ It has priority over *exclude-branches*.
+
+ .. attr:: exclude-branches
+
+ A list of regexes matching branches which should be
+ processed. If omitted, all branches are included.
+ Operates after *exclude-unprotected-branches* and so may
+ be used to further reduce the set of branches (but not
+ increase it).
+
+ It will not exclude a branch which already matched
+ *include-branches*.
+
+ .. attr:: always-dynamic-branches
+
+ A list of regular expressions matching branches which
+ should be treated as if every change newly proposes
+ dynamic Zuul configuration. In other words, the only time
+ Zuul will realize any configuration related to these
+ branches is during the time it is running jobs for a
+ proposed change.
+
+ This is potentially useful for situations with large
+ numbers of rarely used feature branches, but comes at the
+ cost of a significant reduction in Zuul features for these
+ branches.
+
+ Every regular expression listed here will also implicitly
+ be included in *exclude-branches*, therefore Zuul will not
+ load any static in-repo configuration from this branch.
+ These branches will not be available for use in overriding
+ checkouts of repos, nor will they be included in the git
+ repos that Zuul prepares for *required-projects* (unless
+ there is a change in the dependency tree for this branch).
+
+ In particular, this means that the only jobs which can be
+ specified for these branches are pre-merge and gating jobs
+ (such as :term:`check` and :term:`gate`). No post-merge
+ or periodic jobs will run for these branches.
+
+ Using this setting also incurs additional processing for
+ each change submitted for these branches as Zuul must
+ recalculate the configuration layout it uses for such a
+ change as if it included a change to a ``zuul.yaml`` file,
+ even if the change does not alter the configuration).
+
+ With all these caveats in mind, this can be useful for
+ repos with large numbers of rarely used branches as it
+ allows Zuul to omit their configuration in most
+ circumstances and only calculate the configuration of a
+ single additional branch when it is used.
+
.. attr:: extra-config-paths
Normally Zuul loads in-repo configuration from the first
@@ -363,6 +423,57 @@ configuration. Some examples of tenant definitions are:
to add finer filtering to admin rules, for example filtering by the ``iss``
claim (generally equal to the issuer ID).
+ .. attr:: semaphores
+
+ A list of names of :attr:`global-semaphore` objects to allow
+ jobs in this tenant to access.
+
+.. _global_semaphore:
+
+Global Semaphore
+----------------
+
+Semaphores are normally defined in in-repo configuration (see
+:ref:`semaphore`), however to support use-cases where semaphores are
+used to represent constrained global resources that may be used by
+multiple Zuul tenants, semaphores may be defined within the main
+tenant configuration file.
+
+In order for a job to use a global semaphore, the semaphore must first
+be defined in the tenant configuration file with
+:attr:`global-semaphore` and then added to each tenant which should
+have access to it with :attr:`tenant.semaphores`. Once that is done,
+Zuul jobs may use that semaphore in the same way they would use a
+normal tenant-scoped semaphore.
+
+If any tenant which is granted access to a global semaphore also has a
+tenant-scoped semaphore defined with the same name, that definition
+will be treated as a configuration error and subsequently ignored in
+favor of the global semaphore.
+
+An example definition looks similar to the normal semaphore object:
+
+.. code-block:: yaml
+
+ - global-semaphore:
+ name: global-semaphore-foo
+ max: 5
+
+.. attr:: global-semaphore
+
+ The following attributes are available:
+
+ .. attr:: name
+ :required:
+
+ The name of the semaphore, referenced by jobs.
+
+ .. attr:: max
+ :default: 1
+
+ The maximum number of running jobs which can use this semaphore.
+
+
.. _admin_rule_definition:
Access Rule
diff --git a/playbooks/tutorial/quick-start.yaml b/playbooks/tutorial/quick-start.yaml
index 8f114f16f..872b0e689 100644
--- a/playbooks/tutorial/quick-start.yaml
+++ b/playbooks/tutorial/quick-start.yaml
@@ -84,7 +84,7 @@
- name: Find the build URL with regex
set_fact:
- build_url: "{{ result_json | to_json | from_json | json_query(json_query_log_url) | regex_search('(http://[^ ]*)') }}"
+ build_url: "{{ result_json | to_json | from_json | json_query(json_query_log_url) | regex_search('-.*(http://[^ ]*)') | regex_search('(http://[^ ]*)') }}"
vars:
json_query_log_url: "messages[?contains(@.message, 'http://')].message | [1]"
diff --git a/playbooks/zuul-stream/templates/ansible.cfg.j2 b/playbooks/zuul-stream/templates/ansible.cfg.j2
index c07f3c8ab..3638d2699 100644
--- a/playbooks/zuul-stream/templates/ansible.cfg.j2
+++ b/playbooks/zuul-stream/templates/ansible.cfg.j2
@@ -3,7 +3,7 @@ inventory = {{ ansible_user_dir }}/inventory.yaml
gathering = smart
gather_subset = !all
lookup_plugins = {{ ansible_user_dir }}/src/opendev.org/zuul/zuul/zuul/ansible/{{ zuul_ansible_version }}/lookup
-action_plugins = {{ ansible_user_dir }}/src/opendev.org/zuul/zuul/zuul/ansible/{{ zuul_ansible_version }}/actiongeneral:{{ ansible_user_dir }}/src/opendev.org/zuul/zuul/zuul/ansible/{{ zuul_ansible_version }}/action
+action_plugins = {{ ansible_user_dir }}/src/opendev.org/zuul/zuul/zuul/ansible/{{ zuul_ansible_version }}/action
callback_plugins = {{ ansible_user_dir }}/src/opendev.org/zuul/zuul/zuul/ansible/{{ zuul_ansible_version }}/callback
stdout_callback = zuul_stream
library = {{ ansible_user_dir }}/src/opendev.org/zuul/zuul/zuul/ansible/{{ zuul_ansible_version }}/library
diff --git a/releasenotes/notes/add-buildset-statusurl.yaml b/releasenotes/notes/add-buildset-statusurl.yaml
new file mode 100644
index 000000000..e69b701d1
--- /dev/null
+++ b/releasenotes/notes/add-buildset-statusurl.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added build set url to Zuul comment. This provide a quick way
+ for user to reach the build set page from Gerrit. \ No newline at end of file
diff --git a/releasenotes/notes/always-dynamic-dce165ca8b6e212f.yaml b/releasenotes/notes/always-dynamic-dce165ca8b6e212f.yaml
new file mode 100644
index 000000000..0d0fe8226
--- /dev/null
+++ b/releasenotes/notes/always-dynamic-dce165ca8b6e212f.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Added a new
+ :attr:`tenant.untrusted-projects.<project>.always-dynamic-branches`
+ tenant project configuration option. This may be used to specify
+ branches from which Zuul should never load static configuration
+ and instead treat every change as if it newly proposed dynamic
+ configuration. This is potentially useful for large numbers of
+ rarely-used feature branches.
diff --git a/releasenotes/notes/ansible-5-0c9d6626294579e0.yaml b/releasenotes/notes/ansible-5-0c9d6626294579e0.yaml
index 4d8e8fce6..6c57af601 100644
--- a/releasenotes/notes/ansible-5-0c9d6626294579e0.yaml
+++ b/releasenotes/notes/ansible-5-0c9d6626294579e0.yaml
@@ -25,3 +25,8 @@ upgrade:
therefore the latest available micro-version will be installed at
build-time. If you need more control over the version of Ansible
used, see the help text for ``zuul-manage-ansible``.
+
+ - |
+ Python 3.8 or newer is required to run Zuul. This change was necessary
+ to support Ansible 5 and newer as Ansible's minimum python requirement
+ is 3.8.
diff --git a/releasenotes/notes/cherry-pick-merges-9c78fd914b682671.yaml b/releasenotes/notes/cherry-pick-merges-9c78fd914b682671.yaml
new file mode 100644
index 000000000..d5f1d14c5
--- /dev/null
+++ b/releasenotes/notes/cherry-pick-merges-9c78fd914b682671.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ The `cherry-pick` merger mode can now handle merges between branches by
+ performing a `git merge` instead of `git cherry-pick` if the change has
+ multiple parents. Previously, this would fail because git doesn't allow
+ a merge to be cherry-picked.
diff --git a/releasenotes/notes/deduplicate-ac171d3206eb43b3.yaml b/releasenotes/notes/deduplicate-ac171d3206eb43b3.yaml
new file mode 100644
index 000000000..d55ed8fcd
--- /dev/null
+++ b/releasenotes/notes/deduplicate-ac171d3206eb43b3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ If identical jobs are run for multiple changes in a dependency
+ cycle, Zuul may now deduplicate them under certain circumstances.
+ See :attr:`job.deduplicate` for details.
diff --git a/releasenotes/notes/dependency-validation-000f63204da83b4a.yaml b/releasenotes/notes/dependency-validation-000f63204da83b4a.yaml
new file mode 100644
index 000000000..0f88a06fa
--- /dev/null
+++ b/releasenotes/notes/dependency-validation-000f63204da83b4a.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Zuul now treats job dependencies that reference undefined jobs as
+ a configuration error. Previously a job which depended on another
+ job which did not exist would pass initial syntax validation and
+ only cause a failure in freezing the job graph when Zuul attempted
+ to run the job. Now incorrect or missing job dependencies are
+ detected during configuration. This means that new config errors
+ may be prevented from merging. It also means that existing
+ erroneous job or project configurations will be regarded as
+ configuration errors at startup.
diff --git a/releasenotes/notes/gearmanectomy-ecec32b3764eafc1.yaml b/releasenotes/notes/gearmanectomy-ecec32b3764eafc1.yaml
index 7985d030b..f5aaa617b 100644
--- a/releasenotes/notes/gearmanectomy-ecec32b3764eafc1.yaml
+++ b/releasenotes/notes/gearmanectomy-ecec32b3764eafc1.yaml
@@ -8,4 +8,4 @@ upgrade:
Commands such as `zuul enqueue` which were previously executed via
Gearman may still be used by creating an authentication token and
- adding a `web` section to zuul.conf.
+ adding a `webclient` section to zuul.conf.
diff --git a/releasenotes/notes/gerrit-ssh-server-a183363bac9d7ff8.yaml b/releasenotes/notes/gerrit-ssh-server-a183363bac9d7ff8.yaml
new file mode 100644
index 000000000..9e14364e5
--- /dev/null
+++ b/releasenotes/notes/gerrit-ssh-server-a183363bac9d7ff8.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The new `ssh_server` option for gerrit connections may be used to specify
+ a hostname to use for SSH connections while the normal `server` option
+ specifies the hostname to use for HTTP connections.
diff --git a/releasenotes/notes/global-semaphores-719828567158982a.yaml b/releasenotes/notes/global-semaphores-719828567158982a.yaml
new file mode 100644
index 000000000..2d68fb1dc
--- /dev/null
+++ b/releasenotes/notes/global-semaphores-719828567158982a.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Support for global (cross-tenant) semaphores has been added. See
+ :ref:`global_semaphore`.
diff --git a/releasenotes/notes/include-branches-493b6bb04414341b.yaml b/releasenotes/notes/include-branches-493b6bb04414341b.yaml
new file mode 100644
index 000000000..1121c991b
--- /dev/null
+++ b/releasenotes/notes/include-branches-493b6bb04414341b.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Added new tenant project configuration options
+ attr:`tenant.untrusted-projects.<project>.include-branches` and
+ attr:`tenant.untrusted-projects.<project>.exclude-branches`.
+ Similar to *exclude-unprotected-branches*, these may be used to
+ reduce the set of branches from which Zuul will load
+ configuration.
diff --git a/releasenotes/notes/prune-database-eca38464b9606254.yaml b/releasenotes/notes/prune-database-eca38464b9606254.yaml
new file mode 100644
index 000000000..85322c185
--- /dev/null
+++ b/releasenotes/notes/prune-database-eca38464b9606254.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - |
+ A new command, ``prune-database`` has been added to `zuul-admin`
+ in order to remove database entries older than a certain age.
+upgrade:
+ - |
+ A SQL schema migration is added in order to add an ``updated``
+ column to the buildset table which will be used by the
+ `prune-database` command to determine which buildsets to delete.
+ The migration will attempt to use the most recent timestamp
+ associated with the buildset when initializing this column,
+ however, if no timestamps are available, it will initialize it to
+ 1970-01-01. Since this is considerably before Zuul's birthdate,
+ this means that any buildsets without timestamp information will
+ be pruned the first time `prune-database` is run. It is expected
+ that buildsets with no timestamps, even very recent ones, are
+ typically uninteresting and therefore this should not cause a
+ hardship. If this is not the case for your installation, you may
+ want to inspect the database and change the ``updated`` column to
+ a more recent value in these cases.
diff --git a/releasenotes/notes/repo_cache-5524b183a8bc3e6a.yaml b/releasenotes/notes/repo_cache-5524b183a8bc3e6a.yaml
new file mode 100644
index 000000000..e3cbd737e
--- /dev/null
+++ b/releasenotes/notes/repo_cache-5524b183a8bc3e6a.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ The GitHub driver now has support for using a GitHub Enterprise
+ `repository cache
+ <https://docs.github.com/en/enterprise-server@3.3/admin/enterprise-management/caching-repositories/about-repository-caching>`_.
+ See :attr:`<github connection>.repo_cache` for information on how
+ to configure it.
diff --git a/releasenotes/notes/wait-for-init-934370422b22b442.yaml b/releasenotes/notes/wait-for-init-934370422b22b442.yaml
new file mode 100644
index 000000000..9033131f1
--- /dev/null
+++ b/releasenotes/notes/wait-for-init-934370422b22b442.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ The scheduler now accepts an argument `--wait-for-init` which will
+ cause it to wait until all tenants have been initialized before it
+ begins processing pipelines. This may help large systems with
+ excess scheduler capacity perform a rolling restart of schedulers
+ more quickly.
diff --git a/releasenotes/notes/zuul-admin-cli-53f52cff1aad986c.yaml b/releasenotes/notes/zuul-admin-cli-53f52cff1aad986c.yaml
new file mode 100644
index 000000000..03c237ed6
--- /dev/null
+++ b/releasenotes/notes/zuul-admin-cli-53f52cff1aad986c.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ The zuul CLI is renamed `zuul-admin`. The `zuul` command will remain usable until a future version,
+ then will be phased out. Likewise, tenant-scoped, workflow affecting commands such as autohold,
+ enqueue, dequeue, promote are deprecated from the zuul-admin CLI and will be phased out in a
+ future version. They can still be performed via the `zuul-client` CLI.
diff --git a/setup.cfg b/setup.cfg
index 207b5bbb7..382e12019 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@ description-file =
author = Zuul Team
author-email = zuul-discuss@lists.zuul-ci.org
home-page = https://zuul-ci.org/
-python-requires = >=3.6
+python-requires = >=3.8
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
@@ -30,6 +30,7 @@ console_scripts =
zuul-scheduler = zuul.cmd.scheduler:main
zuul-merger = zuul.cmd.merger:main
zuul = zuul.cmd.client:main
+ zuul-admin = zuul.cmd.client:main
zuul-executor = zuul.cmd.executor:main
zuul-bwrap = zuul.driver.bubblewrap:main
zuul-web = zuul.cmd.web:main
diff --git a/tests/base.py b/tests/base.py
index 2e5421dd6..b72bf8380 100644
--- a/tests/base.py
+++ b/tests/base.py
@@ -333,11 +333,9 @@ class GitlabDriverMock(GitlabDriver):
class TestConnectionRegistry(ConnectionRegistry):
- def __init__(self, changes: Dict[str, Dict[str, Change]],
- config: ConfigParser, additional_event_queues,
- upstream_root: str, poller_events,
- git_url_with_auth: bool,
- add_cleanup: Callable[[Callable[[], None]], None]):
+ def __init__(self, changes, config, additional_event_queues,
+ upstream_root, poller_events, git_url_with_auth,
+ add_cleanup):
self.connections = OrderedDict()
self.drivers = {}
@@ -494,7 +492,8 @@ class FakeGerritChange(object):
repo.git.clean('-x', '-f', '-d')
repo.index.merge_tree(parents[1])
- r = repo.index.commit(msg)
+ parent_commits = [repo.commit(p) for p in parents]
+ r = repo.index.commit(msg, parent_commits=parent_commits)
repo.head.reference = 'master'
repo.head.reset(working_tree=True)
@@ -1036,6 +1035,10 @@ class GerritWebServer(object):
self.send_response(404)
self.end_headers()
+ def _409(self):
+ self.send_response(409)
+ self.end_headers()
+
def _get_change(self, change_id):
change_id = urllib.parse.unquote(change_id)
project, branch, change = change_id.split('~')
@@ -1061,7 +1064,7 @@ class GerritWebServer(object):
tag = data.get('tag', None)
fake_gerrit._test_handle_review(
int(change.data['number']), message, False, labels,
- comments, tag=tag)
+ True, False, comments, tag=tag)
self.send_response(200)
self.end_headers()
@@ -1070,10 +1073,26 @@ class GerritWebServer(object):
if not change:
return self._404()
+ candidate = self._get_change(change_id)
+ sr = candidate.getSubmitRecords()
+ if sr[0]['status'] != 'OK':
+ # One of the changes in this topic isn't
+ # ready to merge
+ return self._409()
+ if fake_gerrit._fake_submit_whole_topic:
+ results = fake_gerrit._test_get_submitted_together(change)
+ for record in results:
+ candidate = self._get_change(record['id'])
+ sr = candidate.getSubmitRecords()
+ if sr[0]['status'] != 'OK':
+ # One of the changes in this topic isn't
+ # ready to merge
+ return self._409()
message = None
labels = {}
fake_gerrit._test_handle_review(
- int(change.data['number']), message, True, labels)
+ int(change.data['number']), message, True, labels,
+ False, True)
self.send_response(200)
self.end_headers()
@@ -1148,19 +1167,8 @@ class GerritWebServer(object):
change = fake_gerrit.changes.get(int(number))
if not change:
return self._404()
- topic = change.data.get('topic')
- if not fake_gerrit._fake_submit_whole_topic:
- topic = None
- if topic:
- results = fake_gerrit._simpleQuery(
- f'topic:{topic}', http=True)
- else:
- results = []
- for dep in change.data.get('dependsOn', []):
- dep_change = fake_gerrit.changes.get(int(dep['number']))
- r = dep_change.queryHTTP(internal=True)
- if r not in results:
- results.append(r)
+
+ results = fake_gerrit._test_get_submitted_together(change)
self.send_data(results)
self.end_headers()
@@ -1379,16 +1387,33 @@ class FakeGerritConnection(gerritconnection.GerritConnection):
return event
def review(self, item, message, submit, labels, checks_api, file_comments,
- zuul_event_id=None):
+ phase1, phase2, zuul_event_id=None):
if self.web_server:
return super(FakeGerritConnection, self).review(
item, message, submit, labels, checks_api, file_comments,
- zuul_event_id)
+ phase1, phase2, zuul_event_id)
self._test_handle_review(int(item.change.number), message, submit,
- labels)
+ labels, phase1, phase2)
+
+ def _test_get_submitted_together(self, change):
+ topic = change.data.get('topic')
+ if not self._fake_submit_whole_topic:
+ topic = None
+ if topic:
+ results = self._simpleQuery(f'topic:{topic}', http=True)
+ else:
+ results = [change.queryHTTP(internal=True)]
+ for dep in change.data.get('dependsOn', []):
+ dep_change = self.changes.get(int(dep['number']))
+ r = dep_change.queryHTTP(internal=True)
+ if r not in results:
+ results.append(r)
+ if len(results) == 1:
+ return []
+ return results
def _test_handle_review(self, change_number, message, submit, labels,
- file_comments=None, tag=None):
+ phase1, phase2, file_comments=None, tag=None):
# Handle a review action from a test
change = self.changes[change_number]
@@ -1402,24 +1427,25 @@ class FakeGerritConnection(gerritconnection.GerritConnection):
# happens they can add their own verified event into the queue.
# Nevertheless, we can update change with the new review in gerrit.
- for cat in labels:
- change.addApproval(cat, labels[cat], username=self.user,
- tag=tag)
-
- if message:
- change.messages.append(message)
-
- if file_comments:
- for filename, commentlist in file_comments.items():
- for comment in commentlist:
- change.addComment(filename, comment['line'],
- comment['message'], 'Zuul',
- 'zuul@example.com', self.user,
- comment.get('range'))
- if submit:
+ if phase1:
+ for cat in labels:
+ change.addApproval(cat, labels[cat], username=self.user,
+ tag=tag)
+
+ if message:
+ change.messages.append(message)
+
+ if file_comments:
+ for filename, commentlist in file_comments.items():
+ for comment in commentlist:
+ change.addComment(filename, comment['line'],
+ comment['message'], 'Zuul',
+ 'zuul@example.com', self.user,
+ comment.get('range'))
+ if message:
+ change.setReported()
+ if submit and phase2:
change.setMerged()
- if message:
- change.setReported()
def queryChangeSSH(self, number, event=None):
self.log.debug("Query change SSH: %s", number)
@@ -2104,6 +2130,7 @@ class FakeGitlabMergeRequest(object):
self.state = 'opened'
self.is_merged = False
self.merge_status = 'can_be_merged'
+ self.squash_merge = None
self.labels = []
self.notes = []
self.url = "https://%s/%s/merge_requests/%s" % (
@@ -2142,9 +2169,10 @@ class FakeGitlabMergeRequest(object):
self.state = 'closed'
self._updateTimeStamp()
- def mergeMergeRequest(self):
+ def mergeMergeRequest(self, squash=None):
self.state = 'merged'
self.is_merged = True
+ self.squash_merge = squash
self._updateTimeStamp()
self.merged_at = self.updated_at
@@ -2952,6 +2980,8 @@ class FakeGithubConnection(githubconnection.GithubConnection):
prefix = 'file://x-access-token:%s@' % auth_token
else:
prefix = ''
+ if self.repo_cache:
+ return prefix + os.path.join(self.repo_cache, str(project))
return prefix + os.path.join(self.upstream_root, str(project))
def real_getGitUrl(self, project):
@@ -3119,6 +3149,8 @@ class FakeBuild(object):
result = (RecordingAnsibleJob.RESULT_NORMAL, 0) # Success
if self.shouldFail():
result = (RecordingAnsibleJob.RESULT_NORMAL, 1) # Failure
+ if self.shouldRetry():
+ result = (RecordingAnsibleJob.RESULT_NORMAL, None)
if self.aborted:
result = (RecordingAnsibleJob.RESULT_ABORTED, None)
if self.requeue:
@@ -3133,6 +3165,17 @@ class FakeBuild(object):
return True
return False
+ def shouldRetry(self):
+ entries = self.executor_server.retry_tests.get(self.name, [])
+ for entry in entries:
+ if self.hasChanges(entry['change']):
+ if entry['retries'] is None:
+ return True
+ if entry['retries']:
+ entry['retries'] = entry['retries'] - 1
+ return True
+ return False
+
def writeReturnData(self):
changes = self.executor_server.return_data.get(self.name, {})
data = changes.get(self.change)
@@ -3478,6 +3521,7 @@ class RecordingExecutorServer(zuul.executor.server.ExecutorServer):
self.running_builds = []
self.build_history = []
self.fail_tests = {}
+ self.retry_tests = {}
self.return_data = {}
self.job_builds = {}
@@ -3494,6 +3538,19 @@ class RecordingExecutorServer(zuul.executor.server.ExecutorServer):
l.append(change)
self.fail_tests[name] = l
+ def retryJob(self, name, change, retries=None):
+ """Instruct the executor to report matching builds as retries.
+
+ :arg str name: The name of the job to fail.
+ :arg Change change: The :py:class:`~tests.base.FakeChange`
+ instance which should cause the job to fail. This job
+ will also fail for changes depending on this change.
+
+ """
+ self.retry_tests.setdefault(name, []).append(
+ dict(change=change,
+ retries=retries))
+
def returnData(self, name, change, data):
"""Instruct the executor to return data for this build.
@@ -3634,6 +3691,7 @@ class FakeNodepool(object):
self.python_path = 'auto'
self.shell_type = None
self.connection_port = None
+ self.history = []
def stop(self):
self._running = False
@@ -3792,6 +3850,7 @@ class FakeNodepool(object):
if request['state'] != 'requested':
return
request = request.copy()
+ self.history.append(request)
oid = request['_oid']
del request['_oid']
@@ -4349,11 +4408,12 @@ class SchedulerTestApp:
def __init__(self, log, config, changes, additional_event_queues,
upstream_root, poller_events,
git_url_with_auth, add_cleanup, validate_tenants,
- instance_id):
+ wait_for_init, instance_id):
self.log = log
self.config = config
self.changes = changes
self.validate_tenants = validate_tenants
+ self.wait_for_init = wait_for_init
# Register connections from the config using fakes
self.connections = TestConnectionRegistry(
@@ -4367,7 +4427,8 @@ class SchedulerTestApp:
)
self.connections.configure(self.config)
- self.sched = TestScheduler(self.config, self.connections, self)
+ self.sched = TestScheduler(self.config, self.connections, self,
+ wait_for_init)
self.sched.log = logging.getLogger(f"zuul.Scheduler-{instance_id}")
self.sched._stats_interval = 1
@@ -4389,9 +4450,16 @@ class SchedulerTestApp:
else:
self.sched.validateTenants(self.config, validate_tenants)
- def fullReconfigure(self):
+ def fullReconfigure(self, command_socket=False):
try:
- self.sched.reconfigure(self.config)
+ if command_socket:
+ command_socket = self.sched.config.get(
+ 'scheduler', 'command_socket')
+ with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
+ s.connect(command_socket)
+ s.sendall('full-reconfigure\n'.encode('utf8'))
+ else:
+ self.sched.reconfigure(self.config)
except Exception:
self.log.exception("Reconfiguration failed:")
@@ -4408,21 +4476,30 @@ class SchedulerTestApp:
except Exception:
self.log.exception("Reconfiguration failed:")
- def tenantReconfigure(self, tenants):
+ def tenantReconfigure(self, tenants, command_socket=False):
try:
- self.sched.reconfigure(self.config, smart=False, tenants=tenants)
+ if command_socket:
+ command_socket = self.sched.config.get(
+ 'scheduler', 'command_socket')
+ args = json.dumps(tenants)
+ with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
+ s.connect(command_socket)
+ s.sendall(f'tenant-reconfigure {args}\n'.
+ encode('utf8'))
+ else:
+ self.sched.reconfigure(
+ self.config, smart=False, tenants=tenants)
except Exception:
self.log.exception("Reconfiguration failed:")
class SchedulerTestManager:
- def __init__(self, validate_tenants):
+ def __init__(self, validate_tenants, wait_for_init):
self.instances = []
- self.validate_tenants = validate_tenants
def create(self, log, config, changes, additional_event_queues,
- upstream_root, poller_events,
- git_url_with_auth, add_cleanup, validate_tenants):
+ upstream_root, poller_events, git_url_with_auth,
+ add_cleanup, validate_tenants, wait_for_init):
# Since the config contains a regex we cannot use copy.deepcopy()
# as this will raise an exception with Python <3.7
config_data = StringIO()
@@ -4443,7 +4520,7 @@ class SchedulerTestManager:
additional_event_queues, upstream_root,
poller_events,
git_url_with_auth, add_cleanup,
- validate_tenants, instance_id)
+ validate_tenants, wait_for_init, instance_id)
self.instances.append(app)
return app
@@ -4546,6 +4623,7 @@ class ZuulTestCase(BaseTestCase):
git_url_with_auth: bool = False
log_console_port: int = 19885
validate_tenants = None
+ wait_for_init = None
scheduler_count = SCHEDULER_COUNT
def __getattr__(self, name):
@@ -4710,7 +4788,8 @@ class ZuulTestCase(BaseTestCase):
self.history = self.executor_server.build_history
self.builds = self.executor_server.running_builds
- self.scheds = SchedulerTestManager(self.validate_tenants)
+ self.scheds = SchedulerTestManager(self.validate_tenants,
+ self.wait_for_init)
for _ in range(self.scheduler_count):
self.createScheduler()
@@ -4729,7 +4808,7 @@ class ZuulTestCase(BaseTestCase):
self.log, self.config, self.changes,
self.additional_event_queues, self.upstream_root,
self.poller_events, self.git_url_with_auth,
- self.addCleanup, self.validate_tenants)
+ self.addCleanup, self.validate_tenants, self.wait_for_init)
def createZKContext(self, lock=None):
if lock is None:
diff --git a/tests/fakegitlab.py b/tests/fakegitlab.py
index 1b166c855..c4706b3b1 100644
--- a/tests/fakegitlab.py
+++ b/tests/fakegitlab.py
@@ -233,7 +233,10 @@ class GitlabWebServer(object):
def put_mr_merge(self, data, project, mr):
mr = self._get_mr(project, mr)
- mr.mergeMergeRequest()
+ squash = None
+ if data and isinstance(data, dict):
+ squash = data.get('squash')
+ mr.mergeMergeRequest(squash)
self.send_data({'state': 'merged'})
def put_mr_update(self, data, project, mr):
diff --git a/tests/fixtures/config/circular-dependencies/git/common-config/zuul.yaml b/tests/fixtures/config/circular-dependencies/git/common-config/zuul.yaml
index 855e61007..44f26411c 100644
--- a/tests/fixtures/config/circular-dependencies/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/circular-dependencies/git/common-config/zuul.yaml
@@ -67,6 +67,7 @@
name: base
parent: null
run: playbooks/run.yaml
+ deduplicate: false
required-projects:
- common-config
- org/project
diff --git a/tests/fixtures/config/dynamic-only-project/dynamic.yaml b/tests/fixtures/config/dynamic-only-project/dynamic.yaml
new file mode 100644
index 000000000..d11451181
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/dynamic.yaml
@@ -0,0 +1,10 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project:
+ always-dynamic-branches:
+ - "^feature/.*"
diff --git a/tests/fixtures/config/dynamic-only-project/exclude.yaml b/tests/fixtures/config/dynamic-only-project/exclude.yaml
new file mode 100644
index 000000000..58a0b03f1
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/exclude.yaml
@@ -0,0 +1,10 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project:
+ exclude-branches:
+ - "^feature/.*"
diff --git a/tests/fixtures/config/dynamic-only-project/git/common-config/playbooks/run.yaml b/tests/fixtures/config/dynamic-only-project/git/common-config/playbooks/run.yaml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/git/common-config/playbooks/run.yaml
@@ -0,0 +1 @@
+---
diff --git a/tests/fixtures/config/dynamic-only-project/git/common-config/zuul.yaml b/tests/fixtures/config/dynamic-only-project/git/common-config/zuul.yaml
new file mode 100644
index 000000000..331abc54e
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/git/common-config/zuul.yaml
@@ -0,0 +1,67 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ - event: comment-added
+ comment: '^(Patch Set [0-9]+:\n\n)?(?i:recheck)$'
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- pipeline:
+ name: post
+ manager: independent
+ trigger:
+ gerrit:
+ - event: ref-updated
+ ref: ^(?!refs/).*$
+ precedence: low
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+
+- job:
+ name: central-test
+
+- job:
+ name: central-post
+
+- project:
+ name: "^org/project.*"
+ check:
+ jobs:
+ - central-test
+ gate:
+ jobs:
+ - central-test
+ post:
+ jobs:
+ - central-post
diff --git a/tests/fixtures/config/dynamic-only-project/git/org_project/README b/tests/fixtures/config/dynamic-only-project/git/org_project/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/git/org_project/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/dynamic-only-project/git/org_project/zuul.yaml b/tests/fixtures/config/dynamic-only-project/git/org_project/zuul.yaml
new file mode 100644
index 000000000..9712c0828
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/git/org_project/zuul.yaml
@@ -0,0 +1,17 @@
+- job:
+ name: project-test
+
+# Note: this job is not expected to run
+- job:
+ name: project-post
+
+- project:
+ check:
+ jobs:
+ - project-test
+ gate:
+ jobs:
+ - project-test
+ post:
+ jobs:
+ - project-post
diff --git a/tests/fixtures/config/dynamic-only-project/include.yaml b/tests/fixtures/config/dynamic-only-project/include.yaml
new file mode 100644
index 000000000..315eccc54
--- /dev/null
+++ b/tests/fixtures/config/dynamic-only-project/include.yaml
@@ -0,0 +1,11 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project:
+ include-branches:
+ - master
+ - stable
diff --git a/tests/fixtures/config/global-semaphores-config/broken.yaml b/tests/fixtures/config/global-semaphores-config/broken.yaml
new file mode 100644
index 000000000..f85494370
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/broken.yaml
@@ -0,0 +1,10 @@
+- tenant:
+ name: tenant-two
+ semaphores:
+ - global-semaphore
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project2
diff --git a/tests/fixtures/config/global-semaphores-config/git/common-config/playbooks/run.yaml b/tests/fixtures/config/global-semaphores-config/git/common-config/playbooks/run.yaml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/common-config/playbooks/run.yaml
@@ -0,0 +1 @@
+---
diff --git a/tests/fixtures/config/global-semaphores-config/git/common-config/zuul.yaml b/tests/fixtures/config/global-semaphores-config/git/common-config/zuul.yaml
new file mode 100644
index 000000000..9dba55571
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/common-config/zuul.yaml
@@ -0,0 +1,52 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ - event: comment-added
+ comment: '^(Patch Set [0-9]+:\n\n)?(?i:recheck)$'
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+
+- semaphore:
+ name: common-semaphore
+ max: 10
+
+- job:
+ name: test-global-semaphore
+ semaphores: global-semaphore
+
+- job:
+ name: test-common-semaphore
+ semaphores: common-semaphore
diff --git a/tests/fixtures/config/global-semaphores-config/git/org_project1/README b/tests/fixtures/config/global-semaphores-config/git/org_project1/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/global-semaphores-config/git/org_project1/zuul.yaml b/tests/fixtures/config/global-semaphores-config/git/org_project1/zuul.yaml
new file mode 100644
index 000000000..a4d823b1b
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/org_project1/zuul.yaml
@@ -0,0 +1,19 @@
+# Not actually the global semaphore -- this will be overridden
+- semaphore:
+ name: global-semaphore
+ max: 2
+
+- semaphore:
+ name: project1-semaphore
+ max: 11
+
+- job:
+ name: test-project1-semaphore
+ semaphores: project1-semaphore
+
+- project:
+ check:
+ jobs:
+ - test-global-semaphore
+ - test-common-semaphore
+ - test-project1-semaphore
diff --git a/tests/fixtures/config/global-semaphores-config/git/org_project2/README b/tests/fixtures/config/global-semaphores-config/git/org_project2/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/global-semaphores-config/git/org_project2/zuul.yaml b/tests/fixtures/config/global-semaphores-config/git/org_project2/zuul.yaml
new file mode 100644
index 000000000..a0cdb39b9
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/org_project2/zuul.yaml
@@ -0,0 +1,14 @@
+- semaphore:
+ name: project2-semaphore
+ max: 12
+
+- job:
+ name: test-project2-semaphore
+ semaphores: project2-semaphore
+
+- project:
+ check:
+ jobs:
+ - test-global-semaphore
+ - test-common-semaphore
+ - test-project2-semaphore
diff --git a/tests/fixtures/config/global-semaphores-config/git/org_project3/README b/tests/fixtures/config/global-semaphores-config/git/org_project3/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/org_project3/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/global-semaphores-config/git/org_project3/zuul.yaml b/tests/fixtures/config/global-semaphores-config/git/org_project3/zuul.yaml
new file mode 100644
index 000000000..44a60a004
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/git/org_project3/zuul.yaml
@@ -0,0 +1,20 @@
+# Not actually the global semaphore -- this tenant doesn't have it, so
+# this semaphore will be used.
+- semaphore:
+ name: global-semaphore
+ max: 999
+
+- semaphore:
+ name: project3-semaphore
+ max: 13
+
+- job:
+ name: test-project3-semaphore
+ semaphores: project3-semaphore
+
+- project:
+ check:
+ jobs:
+ - test-global-semaphore
+ - test-common-semaphore
+ - test-project3-semaphore
diff --git a/tests/fixtures/config/global-semaphores-config/main.yaml b/tests/fixtures/config/global-semaphores-config/main.yaml
new file mode 100644
index 000000000..8cb74bc18
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores-config/main.yaml
@@ -0,0 +1,34 @@
+- global-semaphore:
+ name: global-semaphore
+ max: 100
+
+- tenant:
+ name: tenant-one
+ semaphores:
+ - global-semaphore
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project1
+
+- tenant:
+ name: tenant-two
+ semaphores:
+ - global-semaphore
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project2
+
+- tenant:
+ name: tenant-three
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project3
diff --git a/tests/fixtures/config/global-semaphores/git/common-config/playbooks/run.yaml b/tests/fixtures/config/global-semaphores/git/common-config/playbooks/run.yaml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/git/common-config/playbooks/run.yaml
@@ -0,0 +1 @@
+---
diff --git a/tests/fixtures/config/global-semaphores/git/common-config/zuul.yaml b/tests/fixtures/config/global-semaphores/git/common-config/zuul.yaml
new file mode 100644
index 000000000..48b9e9c92
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/git/common-config/zuul.yaml
@@ -0,0 +1,44 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ - event: comment-added
+ comment: '^(Patch Set [0-9]+:\n\n)?(?i:recheck)$'
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+
+- job:
+ name: test-global-semaphore
+ semaphores: global-semaphore
diff --git a/tests/fixtures/config/global-semaphores/git/org_project1/README b/tests/fixtures/config/global-semaphores/git/org_project1/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/global-semaphores/git/org_project1/zuul.yaml b/tests/fixtures/config/global-semaphores/git/org_project1/zuul.yaml
new file mode 100644
index 000000000..a05bdb237
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/git/org_project1/zuul.yaml
@@ -0,0 +1,4 @@
+- project:
+ check:
+ jobs:
+ - test-global-semaphore
diff --git a/tests/fixtures/config/global-semaphores/git/org_project2/README b/tests/fixtures/config/global-semaphores/git/org_project2/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/global-semaphores/git/org_project2/zuul.yaml b/tests/fixtures/config/global-semaphores/git/org_project2/zuul.yaml
new file mode 100644
index 000000000..a05bdb237
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/git/org_project2/zuul.yaml
@@ -0,0 +1,4 @@
+- project:
+ check:
+ jobs:
+ - test-global-semaphore
diff --git a/tests/fixtures/config/global-semaphores/main.yaml b/tests/fixtures/config/global-semaphores/main.yaml
new file mode 100644
index 000000000..719e3e12d
--- /dev/null
+++ b/tests/fixtures/config/global-semaphores/main.yaml
@@ -0,0 +1,25 @@
+- global-semaphore:
+ name: global-semaphore
+ max: 1
+
+- tenant:
+ name: tenant-one
+ semaphores:
+ - global-semaphore
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project1
+
+- tenant:
+ name: tenant-two
+ semaphores:
+ - global-semaphore
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project2
diff --git a/tests/fixtures/config/remote-zuul-stream/git/org_project/playbooks/command.yaml b/tests/fixtures/config/remote-zuul-stream/git/org_project/playbooks/command.yaml
index d6a53be74..d737a1a9b 100644
--- a/tests/fixtures/config/remote-zuul-stream/git/org_project/playbooks/command.yaml
+++ b/tests/fixtures/config/remote-zuul-stream/git/org_project/playbooks/command.yaml
@@ -89,3 +89,28 @@
- failed_in_loop1
- failed_in_loop2
ignore_errors: True
+
+# Try transitive includes two different ways
+- hosts: compute1
+ tasks:
+ - include_role:
+ name: include-echo-role
+ vars:
+ item: transitive-one
+ - include_role:
+ name: include-echo-role
+ vars:
+ item: transitive-two
+
+- hosts: compute1
+ roles:
+ - role: include-echo-role
+ item: transitive-three
+ - role: include-echo-role
+ item: transitive-four
+
+- hosts: compute1
+ tasks:
+ - name: Command Not Found
+ command: command-not-found
+ failed_when: false
diff --git a/tests/fixtures/config/remote-zuul-stream/git/org_project/roles/echo-role/tasks/main.yaml b/tests/fixtures/config/remote-zuul-stream/git/org_project/roles/echo-role/tasks/main.yaml
new file mode 100644
index 000000000..500329797
--- /dev/null
+++ b/tests/fixtures/config/remote-zuul-stream/git/org_project/roles/echo-role/tasks/main.yaml
@@ -0,0 +1,2 @@
+- name: Echo message
+ command: "echo {{item}}"
diff --git a/tests/fixtures/config/remote-zuul-stream/git/org_project/roles/include-echo-role/tasks/main.yaml b/tests/fixtures/config/remote-zuul-stream/git/org_project/roles/include-echo-role/tasks/main.yaml
new file mode 100644
index 000000000..79eade799
--- /dev/null
+++ b/tests/fixtures/config/remote-zuul-stream/git/org_project/roles/include-echo-role/tasks/main.yaml
@@ -0,0 +1,3 @@
+- name: Include echo role
+ include_role:
+ name: echo-role
diff --git a/tests/fixtures/config/tenant-parser/exclude-branches.yaml b/tests/fixtures/config/tenant-parser/exclude-branches.yaml
new file mode 100644
index 000000000..1efbae38f
--- /dev/null
+++ b/tests/fixtures/config/tenant-parser/exclude-branches.yaml
@@ -0,0 +1,12 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project1:
+ exclude-branches:
+ - master
+ - baz
+ - org/project2
diff --git a/tests/fixtures/config/tenant-parser/include-branches.yaml b/tests/fixtures/config/tenant-parser/include-branches.yaml
new file mode 100644
index 000000000..382926488
--- /dev/null
+++ b/tests/fixtures/config/tenant-parser/include-branches.yaml
@@ -0,0 +1,12 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project1:
+ include-branches:
+ - foo
+ - bar
+ - org/project2
diff --git a/tests/fixtures/layouts/circular-deps-node-failure.yaml b/tests/fixtures/layouts/circular-deps-node-failure.yaml
new file mode 100644
index 000000000..244449b82
--- /dev/null
+++ b/tests/fixtures/layouts/circular-deps-node-failure.yaml
@@ -0,0 +1,62 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-auto-shared.yaml b/tests/fixtures/layouts/job-dedup-auto-shared.yaml
new file mode 100644
index 000000000..26896dec8
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-auto-shared.yaml
@@ -0,0 +1,65 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-auto-unshared.yaml b/tests/fixtures/layouts/job-dedup-auto-unshared.yaml
new file mode 100644
index 000000000..244449b82
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-auto-unshared.yaml
@@ -0,0 +1,62 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-auto.yaml b/tests/fixtures/layouts/job-dedup-auto.yaml
new file mode 100644
index 000000000..f36f81136
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-auto.yaml
@@ -0,0 +1,61 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+
+- job:
+ name: common-job
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-empty-nodeset.yaml b/tests/fixtures/layouts/job-dedup-empty-nodeset.yaml
new file mode 100644
index 000000000..f36f81136
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-empty-nodeset.yaml
@@ -0,0 +1,61 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+
+- job:
+ name: common-job
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-false.yaml b/tests/fixtures/layouts/job-dedup-false.yaml
new file mode 100644
index 000000000..2c0e6ee2e
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-false.yaml
@@ -0,0 +1,66 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+ deduplicate: false
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-parent-data.yaml b/tests/fixtures/layouts/job-dedup-parent-data.yaml
new file mode 100644
index 000000000..c88dbe3c6
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-parent-data.yaml
@@ -0,0 +1,81 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: parent-job
+ deduplicate: true
+
+- job:
+ name: forked-child-job
+ deduplicate: true
+
+- job:
+ name: common-child-job
+ deduplicate: true
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - parent-job
+ - common-child-job:
+ dependencies: parent-job
+ - project1-job:
+ dependencies: parent-job
+ - forked-child-job:
+ dependencies: project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - parent-job
+ - common-child-job:
+ dependencies: parent-job
+ - project2-job:
+ dependencies: parent-job
+ - forked-child-job:
+ dependencies: project2-job
diff --git a/tests/fixtures/layouts/job-dedup-retry-child.yaml b/tests/fixtures/layouts/job-dedup-retry-child.yaml
new file mode 100644
index 000000000..89d25db42
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-retry-child.yaml
@@ -0,0 +1,65 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: parent-job
+ deduplicate: true
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - parent-job
+ - project1-job:
+ dependencies: parent-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - parent-job
+ - project2-job:
+ dependencies: parent-job
diff --git a/tests/fixtures/layouts/job-dedup-retry.yaml b/tests/fixtures/layouts/job-dedup-retry.yaml
new file mode 100644
index 000000000..9cf963951
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-retry.yaml
@@ -0,0 +1,66 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ pre-run: playbooks/pre.yaml
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-semaphore-first.yaml b/tests/fixtures/layouts/job-dedup-semaphore-first.yaml
new file mode 100644
index 000000000..fe2dcce06
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-semaphore-first.yaml
@@ -0,0 +1,71 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- semaphore:
+ name: test-semaphore
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+ semaphore:
+ name: test-semaphore
+ resources-first: true
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-semaphore.yaml b/tests/fixtures/layouts/job-dedup-semaphore.yaml
new file mode 100644
index 000000000..5d793a21f
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-semaphore.yaml
@@ -0,0 +1,70 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- semaphore:
+ name: test-semaphore
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+ semaphore:
+ name: test-semaphore
+ required-projects:
+ - org/project1
+ - org/project2
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/job-dedup-true.yaml b/tests/fixtures/layouts/job-dedup-true.yaml
new file mode 100644
index 000000000..559ea5a15
--- /dev/null
+++ b/tests/fixtures/layouts/job-dedup-true.yaml
@@ -0,0 +1,63 @@
+- queue:
+ name: integrated
+ allow-circular-dependencies: true
+
+- pipeline:
+ name: gate
+ manager: dependent
+ success-message: Build succeeded (gate).
+ require:
+ gerrit:
+ approval:
+ - Approved: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Approved: 1
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ start:
+ gerrit:
+ Verified: 0
+ precedence: high
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/run.yaml
+ nodeset:
+ nodes:
+ - label: debian
+ name: controller
+
+- job:
+ name: common-job
+ deduplicate: true
+
+- job:
+ name: project1-job
+
+- job:
+ name: project2-job
+
+- project:
+ name: org/project1
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project1-job
+
+- project:
+ name: org/project2
+ queue: integrated
+ gate:
+ jobs:
+ - common-job
+ - project2-job
diff --git a/tests/fixtures/layouts/merging-gitlab-squash-merge.yaml b/tests/fixtures/layouts/merging-gitlab-squash-merge.yaml
new file mode 100644
index 000000000..bfc8de36b
--- /dev/null
+++ b/tests/fixtures/layouts/merging-gitlab-squash-merge.yaml
@@ -0,0 +1,28 @@
+- pipeline:
+ name: gate-merge
+ manager: dependent
+ trigger:
+ gitlab:
+ - event: gl_merge_request
+ action:
+ - opened
+ - changed
+ success:
+ gitlab:
+ merge: true
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/base.yaml
+
+- job:
+ name: project-test
+ run: playbooks/project-test.yaml
+
+- project:
+ name: org/project1
+ merge-mode: squash-merge
+ gate-merge:
+ jobs:
+ - project-test
diff --git a/tests/fixtures/layouts/sos-circular.yaml b/tests/fixtures/layouts/sos-circular.yaml
index 77667a478..79f8e8d7a 100644
--- a/tests/fixtures/layouts/sos-circular.yaml
+++ b/tests/fixtures/layouts/sos-circular.yaml
@@ -45,6 +45,7 @@
name: base
parent: null
run: playbooks/base.yaml
+ deduplicate: false
nodeset:
nodes:
- label: ubuntu-xenial
diff --git a/tests/fixtures/layouts/two-projects-integrated.yaml b/tests/fixtures/layouts/two-projects-integrated.yaml
index 45c726e4f..89302f62a 100644
--- a/tests/fixtures/layouts/two-projects-integrated.yaml
+++ b/tests/fixtures/layouts/two-projects-integrated.yaml
@@ -58,18 +58,26 @@
jobs:
- test
-- project:
- name: org/project1
+# Define jobs for org/project1 in a template to test that the queue name is
+# picked up correctly from the project config. This should also be the case
+# when the project doesn't include any pipeline config and only references a
+# template.
+- project-template:
+ name: project1-template
check:
- queue: integrated
jobs:
- integration
gate:
- queue: integrated
jobs:
- integration
- project:
+ name: org/project1
+ queue: integrated
+ templates:
+ - project1-template
+
+- project:
name: org/project2
check:
queue: integrated
diff --git a/tests/fixtures/zuul-gerrit-ssh.conf b/tests/fixtures/zuul-gerrit-ssh.conf
new file mode 100644
index 000000000..2f414032c
--- /dev/null
+++ b/tests/fixtures/zuul-gerrit-ssh.conf
@@ -0,0 +1,38 @@
+[statsd]
+# note, use 127.0.0.1 rather than localhost to avoid getting ipv6
+# see: https://github.com/jsocol/pystatsd/issues/61
+server=127.0.0.1
+
+[scheduler]
+tenant_config=main.yaml
+
+[merger]
+git_dir=/tmp/zuul-test/merger-git
+git_user_email=zuul@example.com
+git_user_name=zuul
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+load_multiplier=100
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+ssh_server=ssh-review.example.com
+user=jenkins
+sshkey=fake_id_rsa_path
+password=badpassword
+
+[connection github]
+driver=github
+webhook_token=0000000000000000000000000000000000000000
+
+[connection smtp]
+driver=smtp
+server=localhost
+port=25
+default_from=zuul@example.com
+default_to=you@example.com
+
+[database]
+dburi=$MYSQL_FIXTURE_DBURI$
diff --git a/tests/remote/test_remote_zuul_stream.py b/tests/remote/test_remote_zuul_stream.py
index ab8026d92..1f6b7fff7 100644
--- a/tests/remote/test_remote_zuul_stream.py
+++ b/tests/remote/test_remote_zuul_stream.py
@@ -15,6 +15,7 @@
import os
import re
import textwrap
+from datetime import datetime, timedelta
from tests.base import AnsibleZuulTestCase
@@ -23,11 +24,12 @@ class FunctionalZuulStreamMixIn:
tenant_config_file = 'config/remote-zuul-stream/main.yaml'
# This should be overriden in child classes.
ansible_version = '2.9'
- wait_timeout = 120
def _setUp(self):
self.log_console_port = 19000 + int(
self.ansible_core_version.split('.')[1])
+ self.executor_server.log_console_port = self.log_console_port
+ self.wait_timeout = 120
self.fake_nodepool.remote_ansible = True
ansible_remote = os.environ.get('ZUUL_REMOTE_IPV4')
@@ -98,6 +100,20 @@ class FunctionalZuulStreamMixIn:
if m is None:
raise Exception("'%s' not found in log" % (line,))
+ def _getLogTime(self, line, log):
+ pattern = (r'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d\:\d\d\.\d\d\d\d\d\d)'
+ r' \| %s\n'
+ r'(\d\d\d\d-\d\d-\d\d \d\d:\d\d\:\d\d\.\d\d\d\d\d\d)'
+ % line)
+ log_re = re.compile(pattern, re.MULTILINE)
+ m = log_re.search(log)
+ if m is None:
+ raise Exception("'%s' not found in log" % (line,))
+ else:
+ date1 = datetime.strptime(m.group(1), "%Y-%m-%d %H:%M:%S.%f")
+ date2 = datetime.strptime(m.group(2), "%Y-%m-%d %H:%M:%S.%f")
+ return (date1, date2)
+
def test_command(self):
job = self._run_job('command')
with self.jobLog(job):
@@ -150,6 +166,10 @@ class FunctionalZuulStreamMixIn:
self.assertLogLine(r'compute1 \| failed_in_loop2', text)
self.assertLogLine(r'compute1 \| ok: Item: failed_in_loop2 '
r'Result: 1', text)
+ self.assertLogLine(r'compute1 \| transitive-one', text)
+ self.assertLogLine(r'compute1 \| transitive-two', text)
+ self.assertLogLine(r'compute1 \| transitive-three', text)
+ self.assertLogLine(r'compute1 \| transitive-four', text)
self.assertLogLine(
r'controller \| ok: Runtime: \d:\d\d:\d\d\.\d\d\d\d\d\d', text)
self.assertLogLine('PLAY RECAP', text)
@@ -161,6 +181,10 @@ class FunctionalZuulStreamMixIn:
self.assertLogLine(
r'RUN END RESULT_NORMAL: \[untrusted : review.example.com/'
r'org/project/playbooks/command.yaml@master]', text)
+ time1, time2 = self._getLogTime(r'TASK \[Command Not Found\]',
+ text)
+ self.assertLess((time2 - time1) / timedelta(milliseconds=1),
+ 9000)
def test_module_exception(self):
job = self._run_job('module_failure_exception')
diff --git a/tests/unit/test_circular_dependencies.py b/tests/unit/test_circular_dependencies.py
index be8004d85..315840dfd 100644
--- a/tests/unit/test_circular_dependencies.py
+++ b/tests/unit/test_circular_dependencies.py
@@ -17,7 +17,7 @@ import textwrap
from zuul.model import PromoteEvent
-from tests.base import ZuulTestCase, simple_layout
+from tests.base import ZuulTestCase, simple_layout, iterate_timeout
class TestGerritCircularDependencies(ZuulTestCase):
@@ -464,6 +464,45 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertEqual(A.data["status"], "NEW")
self.assertEqual(B.data["status"], "NEW")
+ @simple_layout('layouts/circular-deps-node-failure.yaml')
+ def test_cycle_failed_node_request(self):
+ # Test a node request failure as part of a dependency cycle
+
+ # Pause nodepool so we can fail the node request later
+ self.fake_nodepool.pause()
+
+ A = self.fake_gerrit.addFakeChange("org/project1", "master", "A")
+ B = self.fake_gerrit.addFakeChange("org/project2", "master", "B")
+
+ # A <-> B (via commit-depends)
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval("Code-Review", 2)
+ B.addApproval("Code-Review", 2)
+ B.addApproval("Approved", 1)
+
+ self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
+ self.waitUntilSettled()
+
+ # Fail the node request and unpause
+ req = self.fake_nodepool.getNodeRequests()
+ self.fake_nodepool.addFailRequest(req[0])
+
+ self.fake_nodepool.unpause()
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 2)
+ self.assertEqual(B.reported, 2)
+ self.assertIn("bundle", A.messages[-1])
+ self.assertIn("bundle", B.messages[-1])
+ self.assertEqual(A.data["status"], "NEW")
+ self.assertEqual(B.data["status"], "NEW")
+
def test_failing_cycle_behind_failing_change(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange("org/project", "master", "A")
@@ -727,7 +766,7 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.fake_gerrit.addEvent(A.addApproval("Approved", 1))
self.waitUntilSettled()
- self.assertEqual(A.reported, 2)
+ self.assertEqual(A.reported, 3)
self.assertEqual(B.reported, 3)
self.assertEqual(A.patchsets[-1]["approvals"][-1]["value"], "-2")
self.assertEqual(B.patchsets[-1]["approvals"][-1]["value"], "-2")
@@ -1074,6 +1113,7 @@ class TestGerritCircularDependencies(ZuulTestCase):
"""
- job:
name: project-vars-job
+ deduplicate: false
vars:
test_var: pass
@@ -1497,6 +1537,362 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(C.data['status'], 'MERGED')
+ def _test_job_deduplication(self):
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(B.data['status'], 'MERGED')
+
+ @simple_layout('layouts/job-dedup-auto-shared.yaml')
+ def test_job_deduplication_auto_shared(self):
+ self._test_job_deduplication()
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is deduplicated
+ # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 3)
+
+ @simple_layout('layouts/job-dedup-auto-unshared.yaml')
+ def test_job_deduplication_auto_unshared(self):
+ self._test_job_deduplication()
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is not deduplicated
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 4)
+
+ @simple_layout('layouts/job-dedup-true.yaml')
+ def test_job_deduplication_true(self):
+ self._test_job_deduplication()
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is deduplicated
+ # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 3)
+
+ @simple_layout('layouts/job-dedup-false.yaml')
+ def test_job_deduplication_false(self):
+ self._test_job_deduplication()
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is not deduplicated, though it would be under auto
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 4)
+
+ @simple_layout('layouts/job-dedup-empty-nodeset.yaml')
+ def test_job_deduplication_empty_nodeset(self):
+ # Make sure that jobs with empty nodesets can still be
+ # deduplicated
+ self._test_job_deduplication()
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is deduplicated
+ # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 0)
+
+ @simple_layout('layouts/job-dedup-auto-shared.yaml')
+ def test_job_deduplication_failed_node_request(self):
+ # Pause nodepool so we can fail the node request later
+ self.fake_nodepool.pause()
+
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ # Fail the node request and unpause
+ for req in self.fake_nodepool.getNodeRequests():
+ if req['requestor_data']['job_name'] == 'common-job':
+ self.fake_nodepool.addFailRequest(req)
+
+ self.fake_nodepool.unpause()
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.data['status'], 'NEW')
+ self.assertHistory([])
+ self.assertEqual(len(self.fake_nodepool.history), 3)
+
+ @simple_layout('layouts/job-dedup-auto-shared.yaml')
+ def test_job_deduplication_failed_job(self):
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+ self.executor_server.failJob("common-job", A)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.data['status'], 'NEW')
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="FAILURE", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is deduplicated
+ # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 3)
+
+ @simple_layout('layouts/job-dedup-retry.yaml')
+ def test_job_deduplication_retry(self):
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ self.executor_server.retryJob('common-job', A)
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'NEW')
+ self.assertEqual(B.data['status'], 'NEW')
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # There should be exactly 3 runs of the job (not 6)
+ dict(name="common-job", result=None, changes="2,1 1,1"),
+ dict(name="common-job", result=None, changes="2,1 1,1"),
+ dict(name="common-job", result=None, changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 5)
+
+ @simple_layout('layouts/job-dedup-retry-child.yaml')
+ def test_job_deduplication_retry_child(self):
+ # This tests retrying a paused build (simulating an executor restart)
+ # See test_data_return_child_from_retried_paused_job
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ self.executor_server.returnData(
+ 'parent-job', A,
+ {'zuul': {'pause': True}}
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ self.executor_server.release('parent-job')
+ self.waitUntilSettled("till job is paused")
+
+ paused_job = self.builds[0]
+ self.assertTrue(paused_job.paused)
+
+ # Stop the job worker to simulate an executor restart
+ for job_worker in self.executor_server.job_workers.values():
+ if job_worker.build_request.uuid == paused_job.uuid:
+ job_worker.stop()
+ self.waitUntilSettled("stop job worker")
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled("all jobs are done")
+ # The "pause" job might be paused during the waitUntilSettled
+ # call and appear settled; it should automatically resume
+ # though, so just wait for it.
+ for x in iterate_timeout(60, 'paused job'):
+ if not self.builds:
+ break
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.assertHistory([
+ dict(name="parent-job", result="ABORTED", changes="2,1 1,1"),
+ dict(name="project1-job", result="ABORTED", changes="2,1 1,1"),
+ dict(name="project2-job", result="ABORTED", changes="2,1 1,1"),
+ dict(name="parent-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 6)
+
+ @simple_layout('layouts/job-dedup-parent-data.yaml')
+ def test_job_deduplication_parent_data(self):
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ # The parent job returns data
+ self.executor_server.returnData(
+ 'parent-job', A,
+ {'zuul':
+ {'artifacts': [
+ {'name': 'image',
+ 'url': 'http://example.com/image',
+ 'metadata': {
+ 'type': 'container_image'
+ }},
+ ]}}
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.assertHistory([
+ dict(name="parent-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # Only one run of the common job since it's the same
+ dict(name="common-child-job", result="SUCCESS", changes="2,1 1,1"),
+ # The forked job depends on different parents
+ # so it should run twice
+ dict(name="forked-child-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="forked-child-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 6)
+
+ def _test_job_deduplication_semaphore(self):
+ "Test semaphores with max=1 (mutex) and get resources first"
+ self.executor_server.hold_jobs_in_build = True
+
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ self.assertEqual(
+ len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
+ 0)
+
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+ self.assertEqual(
+ len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
+ 1)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This is deduplicated
+ # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 3)
+ self.assertEqual(
+ len(tenant.semaphore_handler.semaphoreHolders("test-semaphore")),
+ 0)
+
+ @simple_layout('layouts/job-dedup-semaphore.yaml')
+ def test_job_deduplication_semaphore(self):
+ self._test_job_deduplication_semaphore()
+
+ @simple_layout('layouts/job-dedup-semaphore-first.yaml')
+ def test_job_deduplication_semaphore_resources_first(self):
+ self._test_job_deduplication_semaphore()
+
def test_submitted_together(self):
self.fake_gerrit._fake_submit_whole_topic = True
A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
@@ -1677,6 +2073,44 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertEqual(A.data["status"], "MERGED")
self.assertEqual(B.data["status"], "MERGED")
+ @simple_layout('layouts/deps-by-topic.yaml')
+ def test_deps_by_topic_new_patchset(self):
+ # Make sure that we correctly update the change cache on new
+ # patchsets.
+ A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
+ topic='test-topic')
+ B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
+ topic='test-topic')
+
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
+ self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
+ self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
+
+ self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
+ self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
+ self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
+
+ self.assertHistory([
+ dict(name="test-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="test-job", result="SUCCESS", changes="1,1 2,1"),
+ ], ordered=False)
+
+ A.addPatchset()
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2))
+ self.waitUntilSettled()
+
+ self.assertHistory([
+ # Original check run
+ dict(name="test-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="test-job", result="SUCCESS", changes="1,1 2,1"),
+ # Second check run
+ dict(name="test-job", result="SUCCESS", changes="2,1 1,2"),
+ ], ordered=False)
+
class TestGithubCircularDependencies(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index 87f271490..b51639952 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -1,4 +1,5 @@
# Copyright 2018 Red Hat, Inc.
+# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -17,18 +18,22 @@ import os
import sys
import subprocess
import time
-
import configparser
+import datetime
+import dateutil.tz
+
import fixtures
import jwt
import testtools
-from kazoo.exceptions import NoNodeError
from zuul.zk import ZooKeeperClient
+from zuul.cmd.client import parse_cutoff
from tests.base import BaseTestCase, ZuulTestCase
from tests.base import FIXTURE_DIR
+from kazoo.exceptions import NoNodeError
+
class BaseClientTestCase(BaseTestCase):
config_file = 'zuul.conf'
@@ -66,7 +71,7 @@ class TestTenantValidationClient(BaseClientTestCase):
with open(os.path.join(self.test_root, 'tenant_ok.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'tenant_ok.conf'),
'tenant-conf-check'], stdout=subprocess.PIPE)
p.communicate()
@@ -78,7 +83,7 @@ class TestTenantValidationClient(BaseClientTestCase):
with open(os.path.join(self.test_root, 'tenant_ko.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'tenant_ko.conf'),
'tenant-conf-check'], stdout=subprocess.PIPE)
out, _ = p.communicate()
@@ -100,7 +105,7 @@ class TestWebTokenClient(BaseClientTestCase):
'no_zuul_operator.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'no_zuul_operator.conf'),
'create-auth-token',
'--auth-config', 'zuul_operator',
@@ -122,7 +127,7 @@ class TestWebTokenClient(BaseClientTestCase):
with open(os.path.join(self.test_root, 'JWKS.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'JWKS.conf'),
'create-auth-token',
'--auth-config', 'someauth',
@@ -140,7 +145,7 @@ class TestWebTokenClient(BaseClientTestCase):
with open(os.path.join(self.test_root, 'good.conf'), 'w') as f:
self.config.write(f)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', os.path.join(self.test_root, 'good.conf'),
'create-auth-token',
'--auth-conf', 'zuul_operator',
@@ -187,7 +192,7 @@ class TestKeyOperations(ZuulTestCase):
# Export keys
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'export-keys', export_root],
stdout=subprocess.PIPE)
@@ -204,7 +209,7 @@ class TestKeyOperations(ZuulTestCase):
# Import keys
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'import-keys', export_root],
stdout=subprocess.PIPE)
@@ -222,7 +227,7 @@ class TestKeyOperations(ZuulTestCase):
self.config.write(f)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'copy-keys',
'gerrit', 'org/project',
@@ -242,7 +247,7 @@ class TestKeyOperations(ZuulTestCase):
data['/keystorage/gerrit/neworg/neworg%2Fnewproject/ssh'])
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-keys',
'gerrit', 'org/project',
@@ -269,7 +274,7 @@ class TestKeyOperations(ZuulTestCase):
data.get('/keystorage/gerrit/org'))
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-keys',
'gerrit', 'org/project1',
@@ -280,7 +285,7 @@ class TestKeyOperations(ZuulTestCase):
self.assertEqual(p.returncode, 0)
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-keys',
'gerrit', 'org/project2',
@@ -310,6 +315,9 @@ class TestOfflineZKOperations(ZuulTestCase):
def assertFinalState(self):
pass
+ def assertCleanShutdown(self):
+ pass
+
def test_delete_state(self):
# Shut everything down (as much as possible) to reduce
# logspam and errors.
@@ -328,7 +336,7 @@ class TestOfflineZKOperations(ZuulTestCase):
old_data = self.getZKTree('/keystorage')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-state',
],
@@ -367,7 +375,7 @@ class TestOnlineZKOperations(ZuulTestCase):
# Make sure the pipeline exists
self.getZKTree('/zuul/tenant/tenant-one/pipeline/check/item')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-pipeline-state',
'tenant-one', 'check',
@@ -405,7 +413,7 @@ class TestOnlineZKOperations(ZuulTestCase):
# Make sure the pipeline exists
self.getZKTree('/zuul/tenant/tenant-one/pipeline/gate/item')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
'delete-pipeline-state',
'tenant-one', 'gate',
@@ -429,3 +437,118 @@ class TestOnlineZKOperations(ZuulTestCase):
dict(name='project-test1', result='SUCCESS', changes='2,1'),
dict(name='project-test2', result='SUCCESS', changes='2,1'),
], ordered=False)
+
+
+class TestDBPruneParse(BaseTestCase):
+ def test_db_prune_parse(self):
+ now = datetime.datetime(year=2023, month=5, day=28,
+ hour=22, minute=15, second=1,
+ tzinfo=dateutil.tz.tzutc())
+ reference = datetime.datetime(year=2022, month=5, day=28,
+ hour=22, minute=15, second=1,
+ tzinfo=dateutil.tz.tzutc())
+ # Test absolute times
+ self.assertEqual(
+ reference,
+ parse_cutoff(now, '2022-05-28 22:15:01 UTC', None))
+ self.assertEqual(
+ reference,
+ parse_cutoff(now, '2022-05-28 22:15:01', None))
+
+ # Test relative times
+ self.assertEqual(reference,
+ parse_cutoff(now, None, '8760h'))
+ self.assertEqual(reference,
+ parse_cutoff(now, None, '365d'))
+ with testtools.ExpectedException(RuntimeError):
+ self.assertEqual(reference,
+ parse_cutoff(now, None, '1y'))
+
+
+class DBPruneTestCase(ZuulTestCase):
+ tenant_config_file = 'config/single-tenant/main.yaml'
+
+ def _setup(self):
+ config_file = os.path.join(self.test_root, 'zuul.conf')
+ with open(config_file, 'w') as f:
+ self.config.write(f)
+
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ time.sleep(1)
+
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ connection = self.scheds.first.sched.sql.connection
+ buildsets = connection.getBuildsets()
+ builds = connection.getBuilds()
+ self.assertEqual(len(buildsets), 2)
+ self.assertEqual(len(builds), 6)
+ for build in builds:
+ self.log.debug("Build %s %s %s",
+ build, build.start_time, build.end_time)
+ return config_file
+
+ def test_db_prune_before(self):
+ # Test pruning buildsets before a specific date
+ config_file = self._setup()
+ connection = self.scheds.first.sched.sql.connection
+
+ # Builds are reverse ordered; 0 is most recent
+ buildsets = connection.getBuildsets()
+ start_time = buildsets[0].first_build_start_time
+ self.log.debug("Cutoff %s", start_time)
+
+ p = subprocess.Popen(
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
+ '-c', config_file,
+ 'prune-database',
+ '--before', str(start_time),
+ ],
+ stdout=subprocess.PIPE)
+ out, _ = p.communicate()
+ self.log.debug(out.decode('utf8'))
+
+ buildsets = connection.getBuildsets()
+ builds = connection.getBuilds()
+ self.assertEqual(len(buildsets), 1)
+ self.assertEqual(len(builds), 3)
+ for build in builds:
+ self.log.debug("Build %s %s %s",
+ build, build.start_time, build.end_time)
+
+ def test_db_prune_older_than(self):
+ # Test pruning buildsets older than a relative time
+ config_file = self._setup()
+ connection = self.scheds.first.sched.sql.connection
+
+ # We use 0d as the relative time here since the earliest we
+ # support is 1d and that's tricky in unit tests. The
+ # prune_before test handles verifying that we don't just
+ # always delete everything.
+ p = subprocess.Popen(
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
+ '-c', config_file,
+ 'prune-database',
+ '--older-than', '0d',
+ ],
+ stdout=subprocess.PIPE)
+ out, _ = p.communicate()
+ self.log.debug(out.decode('utf8'))
+
+ buildsets = connection.getBuildsets()
+ builds = connection.getBuilds()
+ self.assertEqual(len(buildsets), 0)
+ self.assertEqual(len(builds), 0)
+
+
+class TestDBPruneMysql(DBPruneTestCase):
+ config_file = 'zuul-sql-driver-mysql.conf'
+
+
+class TestDBPrunePostgres(DBPruneTestCase):
+ config_file = 'zuul-sql-driver-postgres.conf'
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index 153130d77..571df7550 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -211,6 +211,31 @@ class TestTenantSimple(TenantParserTestCase):
r'review.example.com/org/project2 @master.*',
update_logs.output)
+ def test_cache_new_branch(self):
+ first = self.scheds.first
+ lock1 = first.sched.layout_update_lock
+ lock2_ = first.sched.run_handler_lock
+ with lock1, lock2_:
+ self.create_branch('org/project1', 'stable')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project1', 'stable'))
+
+ second = self.createScheduler()
+ second.start()
+ self.assertEqual(len(self.scheds), 2)
+ for _ in iterate_timeout(10, "until priming is complete"):
+ state_one = first.sched.local_layout_state.get("tenant-one")
+ if state_one:
+ break
+
+ for _ in iterate_timeout(
+ 10, "all schedulers to have the same layout state"):
+ if (second.sched.local_layout_state.get(
+ "tenant-one") == state_one):
+ break
+ self.waitUntilSettled()
+
def test_variant_description(self):
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
job = tenant.layout.jobs.get("project2-job")
@@ -534,6 +559,58 @@ class TestTenantUnprotectedBranches(TenantParserTestCase):
self.assertIsNone(tpc[project_name].exclude_unprotected_branches)
+class TestTenantIncludeBranches(TenantParserTestCase):
+ tenant_config_file = 'config/tenant-parser/include-branches.yaml'
+
+ def test_tenant_branches(self):
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+
+ self.assertEqual(['common-config'],
+ [x.name for x in tenant.config_projects])
+ self.assertEqual(['org/project1', 'org/project2'],
+ [x.name for x in tenant.untrusted_projects])
+
+ tpc = tenant.project_configs
+ project_name = tenant.config_projects[0].canonical_name
+ self.assertEqual(['master'], tpc[project_name].branches)
+
+ # No branches pass the filter at the start
+ project_name = tenant.untrusted_projects[0].canonical_name
+ self.assertEqual([], tpc[project_name].branches)
+
+ # Create the foo branch
+ self.create_branch('org/project1', 'foo')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project1', 'foo'))
+ self.waitUntilSettled()
+
+ # It should pass the filter
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tpc = tenant.project_configs
+ project_name = tenant.untrusted_projects[0].canonical_name
+ self.assertEqual(['foo'], tpc[project_name].branches)
+
+ # Create the baz branch
+ self.create_branch('org/project1', 'baz')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project1', 'baz'))
+ self.waitUntilSettled()
+
+ # It should not pass the filter
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tpc = tenant.project_configs
+ project_name = tenant.untrusted_projects[0].canonical_name
+ self.assertEqual(['foo'], tpc[project_name].branches)
+
+
+class TestTenantExcludeBranches(TestTenantIncludeBranches):
+ tenant_config_file = 'config/tenant-parser/exclude-branches.yaml'
+
+ # Same test results as include-branches
+
+
class TestTenantExcludeAll(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/exclude-all.yaml'
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index 18db91789..ab781252e 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -73,7 +73,7 @@ class TestSQLConnectionMysql(ZuulTestCase):
buildset_table = table_prefix + 'zuul_buildset'
build_table = table_prefix + 'zuul_build'
- self.assertEqual(19, len(insp.get_columns(buildset_table)))
+ self.assertEqual(20, len(insp.get_columns(buildset_table)))
self.assertEqual(13, len(insp.get_columns(build_table)))
def test_sql_tables_created(self):
diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py
index 60f7eea37..3453f9f72 100644
--- a/tests/unit/test_database.py
+++ b/tests/unit/test_database.py
@@ -106,6 +106,7 @@ class TestMysqlDatabase(DBBaseTestCase):
connection.exec_driver_sql(f"drop table {table}")
connection.exec_driver_sql("set foreign_key_checks=1")
+ self.connection.force_migrations = True
self.connection._migrate('c57e9e76b812')
with self.connection.engine.begin() as connection:
connection.exec_driver_sql(
@@ -122,6 +123,49 @@ class TestMysqlDatabase(DBBaseTestCase):
"select result from zuul_buildset")]
self.assertEqual(results, ['SUCCESS', 'MERGE_CONFLICT'])
+ def test_migration_c7467b642498(self):
+ with self.connection.engine.begin() as connection:
+ connection.exec_driver_sql("set foreign_key_checks=0")
+ for table in connection.exec_driver_sql("show tables"):
+ table = table[0]
+ connection.exec_driver_sql(f"drop table {table}")
+ connection.exec_driver_sql("set foreign_key_checks=1")
+
+ self.connection.force_migrations = True
+ self.connection._migrate('4647def24b32')
+ with self.connection.engine.begin() as connection:
+ connection.exec_driver_sql(
+ "insert into zuul_buildset (result) values ('SUCCESS')")
+ connection.exec_driver_sql(
+ "insert into zuul_buildset (result, first_build_start_time) "
+ "values ('SUCCESS', '2022-05-01 12:34:56')")
+ connection.exec_driver_sql(
+ "insert into zuul_buildset (result, last_build_end_time) "
+ "values ('SUCCESS', '2022-05-02 12:34:56')")
+ connection.exec_driver_sql(
+ "insert into zuul_buildset (result, event_timestamp) "
+ "values ('SUCCESS', '2022-05-03 12:34:56')")
+ connection.exec_driver_sql(
+ "insert into zuul_buildset (result, "
+ "first_build_start_time, "
+ "last_build_end_time, "
+ "event_timestamp)"
+ "values ('SUCCESS', "
+ "'2022-05-11 12:34:56', "
+ "'2022-05-12 12:34:56', "
+ "'2022-05-13 12:34:56')")
+
+ self.connection._migrate()
+ with self.connection.engine.begin() as connection:
+ results = [str(r[0]) for r in connection.exec_driver_sql(
+ "select updated from zuul_buildset")]
+ self.assertEqual(results,
+ ['1970-01-01 00:00:00',
+ '2022-05-01 12:34:56',
+ '2022-05-02 12:34:56',
+ '2022-05-03 12:34:56',
+ '2022-05-13 12:34:56'])
+
def test_buildsets(self):
tenant = 'tenant1',
buildset_uuid = 'deadbeef'
diff --git a/tests/unit/test_gerrit.py b/tests/unit/test_gerrit.py
index bcb8f7bc8..f0f9027bd 100644
--- a/tests/unit/test_gerrit.py
+++ b/tests/unit/test_gerrit.py
@@ -148,6 +148,25 @@ class TestGerrit(BaseTestCase):
'ssh://gerrit@localhost:29418/org/project',
url)
+ def test_ssh_server_getGitURL(self):
+ gerrit_config = {
+ 'user': 'gerrit',
+ 'server': 'otherserver',
+ 'password': '1/badpassword',
+ 'ssh_server': 'localhost',
+ 'git_over_ssh': 'true',
+ }
+ # The 1/ in the password ensures we test the url encoding
+ # path; this is the format of password we get from
+ # googlesource.com.
+ driver = GerritDriver()
+ gerrit = GerritConnection(driver, 'review_gerrit', gerrit_config)
+ project = gerrit.source.getProject('org/project')
+ url = gerrit.source.getGitUrl(project)
+ self.assertEqual(
+ 'ssh://gerrit@localhost:29418/org/project',
+ url)
+
class TestGerritWeb(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
@@ -767,3 +786,84 @@ class TestWrongConnection(ZuulTestCase):
dict(name='test-job', result='SUCCESS', changes='1,1'),
dict(name='test-job', result='SUCCESS', changes='2,1'),
], ordered=False)
+
+
+class TestGerritFake(ZuulTestCase):
+ config_file = "zuul-gerrit-github.conf"
+ tenant_config_file = "config/circular-dependencies/main.yaml"
+
+ def _get_tuple(self, change_number):
+ ret = []
+ data = self.fake_gerrit.get(
+ f'changes/{change_number}/submitted_together')
+ for c in data:
+ dep_change = c['_number']
+ dep_ps = c['revisions'][c['current_revision']]['_number']
+ ret.append((dep_change, dep_ps))
+ return sorted(ret)
+
+ def test_submitted_together_normal(self):
+ # Test that the fake submitted together endpoint returns
+ # expected data
+
+ # This test verifies behavior with submitWholeTopic=False
+
+ # A single change
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ data = self._get_tuple(1)
+ self.assertEqual(data, [])
+ ret = self.fake_gerrit._getSubmittedTogether(A, None)
+ self.assertEqual(ret, [])
+
+ # A dependent series (B->A)
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.setDependsOn(A, 1)
+ data = self._get_tuple(2)
+ self.assertEqual(data, [(1, 1), (2, 1)])
+ # The Gerrit connection method filters out the queried change
+ ret = self.fake_gerrit._getSubmittedTogether(B, None)
+ self.assertEqual(ret, [(1, 1)])
+
+ # A topic cycle
+ C1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'C1',
+ topic='test-topic')
+ self.fake_gerrit.addFakeChange('org/project', 'master', 'C2',
+ topic='test-topic')
+ data = self._get_tuple(3)
+ self.assertEqual(data, [])
+ ret = self.fake_gerrit._getSubmittedTogether(C1, None)
+ self.assertEqual(ret, [])
+
+ def test_submitted_together_whole_topic(self):
+ # Test that the fake submitted together endpoint returns
+ # expected data
+
+ # This test verifies behavior with submitWholeTopic=True
+ self.fake_gerrit._fake_submit_whole_topic = True
+
+ # A single change
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ data = self._get_tuple(1)
+ self.assertEqual(data, [])
+ ret = self.fake_gerrit._getSubmittedTogether(A, None)
+ self.assertEqual(ret, [])
+
+ # A dependent series (B->A)
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.setDependsOn(A, 1)
+ data = self._get_tuple(2)
+ self.assertEqual(data, [(1, 1), (2, 1)])
+ # The Gerrit connection method filters out the queried change
+ ret = self.fake_gerrit._getSubmittedTogether(B, None)
+ self.assertEqual(ret, [(1, 1)])
+
+ # A topic cycle
+ C1 = self.fake_gerrit.addFakeChange('org/project', 'master', 'C1',
+ topic='test-topic')
+ self.fake_gerrit.addFakeChange('org/project', 'master', 'C2',
+ topic='test-topic')
+ data = self._get_tuple(3)
+ self.assertEqual(data, [(3, 1), (4, 1)])
+ # The Gerrit connection method filters out the queried change
+ ret = self.fake_gerrit._getSubmittedTogether(C1, None)
+ self.assertEqual(ret, [(4, 1)])
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index 93894be9d..1bfba36bb 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -23,12 +23,14 @@ import textwrap
from unittest import mock, skip
import git
+import gitdb
import github3.exceptions
from tests.fakegithub import FakeFile, FakeGithubEnterpriseClient
from zuul.driver.github.githubconnection import GithubShaCache
from zuul.zk.layout import LayoutState
from zuul.lib import strings
+from zuul.merger.merger import Repo
from zuul.model import MergeRequest, EnqueueEvent, DequeueEvent
from tests.base import (AnsibleZuulTestCase, BaseTestCase,
@@ -1200,7 +1202,7 @@ class TestGithubDriver(ZuulTestCase):
self.assertEqual(1, len(A.comments))
self.assertFalse(A.is_merged)
self.assertIn('This change is unable to merge '
- 'due to a missing requirement.',
+ 'due to a missing merge requirement.',
A.comments[0])
# This test case verifies that no reconfiguration happens if a branch was
@@ -2407,7 +2409,7 @@ class TestCheckRunAnnotations(ZuulGithubAppTestCase, AnsibleZuulTestCase):
})
-class TestGithubDriverEnterise(ZuulGithubAppTestCase):
+class TestGithubDriverEnterprise(ZuulGithubAppTestCase):
config_file = 'zuul-github-driver-enterprise.conf'
scheduler_count = 1
@@ -2445,7 +2447,7 @@ class TestGithubDriverEnterise(ZuulGithubAppTestCase):
self.assertEqual(len(A.comments), 0)
-class TestGithubDriverEnteriseLegacy(ZuulGithubAppTestCase):
+class TestGithubDriverEnterpriseLegacy(ZuulGithubAppTestCase):
config_file = 'zuul-github-driver-enterprise.conf'
scheduler_count = 1
@@ -2485,3 +2487,103 @@ class TestGithubDriverEnteriseLegacy(ZuulGithubAppTestCase):
r'.*I shouldnt be seen.*',
re.DOTALL)))
self.assertEqual(len(A.comments), 0)
+
+
+class TestGithubDriverEnterpriseCache(ZuulGithubAppTestCase):
+ config_file = 'zuul-github-driver-enterprise.conf'
+ scheduler_count = 1
+
+ def setup_config(self, config_file):
+ self.upstream_cache_root = self.upstream_root + '-cache'
+ config = super().setup_config(config_file)
+ # This adds the GHE repository cache feature
+ config.set('connection github', 'repo_cache', self.upstream_cache_root)
+ config.set('connection github', 'repo_retry_timeout', '30')
+ # Synchronize the upstream repos to the upstream repo cache
+ self.synchronize_repo('org/common-config')
+ self.synchronize_repo('org/project')
+ return config
+
+ def init_repo(self, project, tag=None):
+ super().init_repo(project, tag)
+ # After creating the upstream repo, also create the empty
+ # cache repo (but unsynchronized for now)
+ parts = project.split('/')
+ path = os.path.join(self.upstream_cache_root, *parts[:-1])
+ if not os.path.exists(path):
+ os.makedirs(path)
+ path = os.path.join(self.upstream_cache_root, project)
+ repo = git.Repo.init(path)
+
+ with repo.config_writer() as config_writer:
+ config_writer.set_value('user', 'email', 'user@example.com')
+ config_writer.set_value('user', 'name', 'User Name')
+
+ def synchronize_repo(self, project):
+ # Synchronize the upstream repo to the cache
+ upstream_path = os.path.join(self.upstream_root, project)
+ upstream = git.Repo(upstream_path)
+
+ cache_path = os.path.join(self.upstream_cache_root, project)
+ cache = git.Repo(cache_path)
+
+ refs = upstream.git.for_each_ref(
+ '--format=%(objectname) %(refname)'
+ )
+ for ref in refs.splitlines():
+ parts = ref.split(" ")
+ if len(parts) == 2:
+ commit, ref = parts
+ else:
+ continue
+
+ self.log.debug("Synchronize ref %s: %s", ref, commit)
+ cache.git.fetch(upstream_path, ref)
+ binsha = gitdb.util.to_bin_sha(commit)
+ obj = git.objects.Object.new_from_sha(cache, binsha)
+ git.refs.Reference.create(cache, ref, obj, force=True)
+
+ @simple_layout('layouts/merging-github.yaml', driver='github')
+ def test_github_repo_cache(self):
+ # Test that we fetch and configure retries correctly when
+ # using a github enterprise repo cache (the cache can be
+ # slightly out of sync).
+ github = self.fake_github.getGithubClient()
+ repo = github.repo_from_project('org/project')
+ repo._set_branch_protection('master', require_review=True)
+
+ # Make sure we have correctly overridden the retry attempts
+ merger = self.executor_server.merger
+ repo = merger.getRepo('github', 'org/project')
+ self.assertEqual(repo.retry_attempts, 1)
+
+ # Our initial attempt should fail; make it happen quickly
+ self.patch(Repo, 'retry_interval', 1)
+
+ # pipeline merges the pull request on success
+ A = self.fake_github.openFakePullRequest('org/project', 'master',
+ 'PR title',
+ body='I shouldnt be seen',
+ body_text='PR body')
+
+ A.addReview('user', 'APPROVED')
+ self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
+ self.waitUntilSettled('initial failed attempt')
+
+ self.assertFalse(A.is_merged)
+
+ # Now synchronize the upstream repo to the cache and try again
+ self.synchronize_repo('org/project')
+
+ self.fake_github.emitEvent(A.getCommentAddedEvent('merge me'))
+ self.waitUntilSettled('second successful attempt')
+
+ self.assertTrue(A.is_merged)
+
+ self.assertThat(A.merge_message,
+ MatchesRegex(r'.*PR title\n\nPR body.*', re.DOTALL))
+ self.assertThat(A.merge_message,
+ Not(MatchesRegex(
+ r'.*I shouldnt be seen.*',
+ re.DOTALL)))
+ self.assertEqual(len(A.comments), 0)
diff --git a/tests/unit/test_gitlab_driver.py b/tests/unit/test_gitlab_driver.py
index 09184be13..2715cdef1 100644
--- a/tests/unit/test_gitlab_driver.py
+++ b/tests/unit/test_gitlab_driver.py
@@ -665,6 +665,22 @@ class TestGitlabDriver(ZuulTestCase):
self.getJobFromHistory('project-test').result)
self.assertEqual('merged', A.state)
+ @simple_layout('layouts/merging-gitlab-squash-merge.yaml', driver='gitlab')
+ def test_merge_squash(self):
+
+ A = self.fake_gitlab.openFakeMergeRequest(
+ 'org/project1', 'master', 'A')
+
+ self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
+ self.waitUntilSettled()
+ # canMerge is validated
+ self.assertEqual(1, len(self.history))
+
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test').result)
+ self.assertEqual('merged', A.state)
+ self.assertTrue(A.squash_merge)
+
@simple_layout('layouts/crd-gitlab.yaml', driver='gitlab')
def test_crd_dependent(self):
diff --git a/tests/unit/test_global_semaphores.py b/tests/unit/test_global_semaphores.py
new file mode 100644
index 000000000..b76f9d812
--- /dev/null
+++ b/tests/unit/test_global_semaphores.py
@@ -0,0 +1,169 @@
+# Copyright 2022 Acme Gating, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import zuul.configloader
+
+from tests.base import ZuulTestCase
+
+
+class TestGlobalSemaphoresConfig(ZuulTestCase):
+ tenant_config_file = 'config/global-semaphores-config/main.yaml'
+
+ def assertSemaphores(self, tenant, semaphores):
+ for k, v in semaphores.items():
+ self.assertEqual(
+ len(tenant.semaphore_handler.semaphoreHolders(k)),
+ v, k)
+
+ def assertSemaphoresMax(self, tenant, semaphores):
+ for k, v in semaphores.items():
+ abide = tenant.semaphore_handler.abide
+ semaphore = tenant.layout.getSemaphore(abide, k)
+ self.assertEqual(semaphore.max, v, k)
+
+ def test_semaphore_scope(self):
+ # This tests global and tenant semaphore scope
+ self.executor_server.hold_jobs_in_build = True
+ tenant1 = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tenant2 = self.scheds.first.sched.abide.tenants.get('tenant-two')
+ tenant3 = self.scheds.first.sched.abide.tenants.get('tenant-three')
+
+ # The different max values will tell us that we have the right
+ # semaphore objects. Each tenant has one tenant-scope
+ # semaphore in a tenant-specific project, and one tenant-scope
+ # semaphore with a common definition. Tenants 1 and 2 share a
+ # global-scope semaphore, and tenant 3 has a tenant-scope
+ # semaphore with the same name.
+
+ # Here is what is defined in each tenant:
+ # Tenant-one:
+ # * global-semaphore: scope:global max:100 definition:main.yaml
+ # * common-semaphore: scope:tenant max:10 definition:common-config
+ # * project1-semaphore: scope:tenant max:11 definition:project1
+ # * (global-semaphore): scope:tenant max:2 definition:project1
+ # [unused since it shadows the actual global-semaphore]
+ # Tenant-two:
+ # * global-semaphore: scope:global max:100 definition:main.yaml
+ # * common-semaphore: scope:tenant max:10 definition:common-config
+ # * project2-semaphore: scope:tenant max:12 definition:project2
+ # Tenant-three:
+ # * global-semaphore: scope:global max:999 definition:project3
+ # * common-semaphore: scope:tenant max:10 definition:common-config
+ # * project3-semaphore: scope:tenant max:13 definition:project3
+ self.assertSemaphoresMax(tenant1, {'global-semaphore': 100,
+ 'common-semaphore': 10,
+ 'project1-semaphore': 11,
+ 'project2-semaphore': 1,
+ 'project3-semaphore': 1})
+ self.assertSemaphoresMax(tenant2, {'global-semaphore': 100,
+ 'common-semaphore': 10,
+ 'project1-semaphore': 1,
+ 'project2-semaphore': 12,
+ 'project3-semaphore': 1})
+ # This "global" semaphore is really tenant-scoped, it just has
+ # the same name.
+ self.assertSemaphoresMax(tenant3, {'global-semaphore': 999,
+ 'common-semaphore': 10,
+ 'project1-semaphore': 1,
+ 'project2-semaphore': 1,
+ 'project3-semaphore': 13})
+
+ # We should have a config error in tenant1 due to the
+ # redefinition.
+ self.assertEquals(len(tenant1.layout.loading_errors), 1)
+ self.assertEquals(len(tenant2.layout.loading_errors), 0)
+ self.assertEquals(len(tenant3.layout.loading_errors), 0)
+
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+ C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ # Checking the number of holders tells us whethere we are
+ # using global or tenant-scoped semaphores. Each in-use
+ # semaphore in a tenant should have only one holder except the
+ # global-scope semaphore shared between tenants 1 and 2.
+ self.assertSemaphores(tenant1, {'global-semaphore': 2,
+ 'common-semaphore': 1,
+ 'project1-semaphore': 1,
+ 'project2-semaphore': 0,
+ 'project3-semaphore': 0})
+ self.assertSemaphores(tenant2, {'global-semaphore': 2,
+ 'common-semaphore': 1,
+ 'project1-semaphore': 0,
+ 'project2-semaphore': 1,
+ 'project3-semaphore': 0})
+ self.assertSemaphores(tenant3, {'global-semaphore': 1,
+ 'common-semaphore': 1,
+ 'project1-semaphore': 0,
+ 'project2-semaphore': 0,
+ 'project3-semaphore': 1})
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+
+class TestGlobalSemaphoresBroken(ZuulTestCase):
+ validate_tenants = []
+ tenant_config_file = 'config/global-semaphores-config/broken.yaml'
+ # This test raises a config error during the startup of the test
+ # case which makes the first scheduler fail during its startup.
+ # The second (or any additional) scheduler won't even run as the
+ # startup is serialized in tests/base.py.
+ # Thus it doesn't make sense to execute this test with multiple
+ # schedulers.
+ scheduler_count = 1
+
+ def setUp(self):
+ self.assertRaises(zuul.configloader.GlobalSemaphoreNotFoundError,
+ super().setUp)
+
+ def test_broken_global_semaphore_config(self):
+ pass
+
+
+class TestGlobalSemaphores(ZuulTestCase):
+ tenant_config_file = 'config/global-semaphores/main.yaml'
+
+ def test_global_semaphores(self):
+ # This tests that a job finishing in one tenant will correctly
+ # start a job in another tenant waiting on the semahpore.
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertHistory([])
+ self.assertBuilds([
+ dict(name='test-global-semaphore', changes='1,1'),
+ ])
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertHistory([
+ dict(name='test-global-semaphore',
+ result='SUCCESS', changes='1,1'),
+ dict(name='test-global-semaphore',
+ result='SUCCESS', changes='2,1'),
+ ], ordered=False)
diff --git a/tests/unit/test_model_upgrade.py b/tests/unit/test_model_upgrade.py
index f4a18bdfc..2004b317b 100644
--- a/tests/unit/test_model_upgrade.py
+++ b/tests/unit/test_model_upgrade.py
@@ -360,3 +360,45 @@ class TestGithubModelUpgrade(ZuulTestCase):
dict(name='project-test2', result='SUCCESS'),
], ordered=False)
self.assertTrue(A.is_merged)
+
+
+class TestDeduplication(ZuulTestCase):
+ config_file = "zuul-gerrit-github.conf"
+ tenant_config_file = "config/circular-dependencies/main.yaml"
+ scheduler_count = 1
+
+ def _test_job_deduplication(self):
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(B.data['status'], 'MERGED')
+
+ @simple_layout('layouts/job-dedup-auto-shared.yaml')
+ @model_version(7)
+ def test_job_deduplication_auto_shared(self):
+ self._test_job_deduplication()
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"),
+ # This would be deduplicated
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+ self.assertEqual(len(self.fake_nodepool.history), 4)
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 6ab5b3012..c6865a8d7 100644
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -26,6 +26,7 @@ from collections import namedtuple
from unittest import mock, skip
from uuid import uuid4
from kazoo.exceptions import NoNodeError
+from testtools.matchers import StartsWith
import git
import fixtures
@@ -365,6 +366,10 @@ class TestScheduler(ZuulTestCase):
'label1')
self.assertEqual(self.getJobFromHistory('project-test2').node,
'label1')
+ self.assertThat(A.messages[1],
+ StartsWith(
+ 'Build succeeded (gate).\n'
+ 'https://zuul.example.com/t/tenant-one/buildset'))
# TODOv3(jeblair): we may want to report stats by tenant (also?).
# Per-driver
@@ -1921,7 +1926,7 @@ class TestScheduler(ZuulTestCase):
self.assertEqual(2, len(A.messages))
self.assertEqual(A.data['status'], 'NEW')
self.assertIn('This change is unable to merge '
- 'due to a missing requirement.',
+ 'due to a missing merge requirement.',
A.messages[1])
@simple_layout('layouts/nonvoting-job-approval.yaml')
@@ -7062,6 +7067,51 @@ class TestSchedulerMerges(ZuulTestCase):
result = self._test_project_merge_mode('cherry-pick')
self.assertEqual(result, expected_messages)
+ def test_project_merge_mode_cherrypick_branch_merge(self):
+ "Test that branches can be merged together in cherry-pick mode"
+ self.create_branch('org/project-merge-branches', 'mp')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project-merge-branches', 'mp'))
+ self.waitUntilSettled()
+
+ path = os.path.join(self.upstream_root, 'org/project-merge-branches')
+ repo = git.Repo(path)
+ master_sha = repo.heads.master.commit.hexsha
+ mp_sha = repo.heads.mp.commit.hexsha
+
+ self.executor_server.hold_jobs_in_build = True
+ M = self.fake_gerrit.addFakeChange(
+ 'org/project-merge-branches', 'master', 'M',
+ merge_parents=[
+ master_sha,
+ mp_sha,
+ ])
+ M.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(M.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.executor_server.release('.*-merge')
+ self.waitUntilSettled()
+
+ build = self.builds[-1]
+ self.assertEqual(build.parameters['zuul']['branch'], 'master')
+ path = os.path.join(build.jobdir.src_root, 'review.example.com',
+ "org/project-merge-branches")
+ repo = git.Repo(path)
+ repo_messages = [c.message.strip() for c in repo.iter_commits()]
+ repo_messages.reverse()
+ correct_messages = [
+ 'initial commit',
+ 'add content from fixture',
+ 'mp commit',
+ 'M-1']
+ self.assertEqual(repo_messages, correct_messages)
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
def test_merge_branch(self):
"Test that the right commits are on alternate branches"
self.create_branch('org/project-merge-branches', 'mp')
@@ -8128,8 +8178,8 @@ class TestSemaphoreInRepo(ZuulTestCase):
item_dynamic_layout = pipeline.manager._layout_cache.get(
queue_item.layout_uuid)
self.assertIsNotNone(item_dynamic_layout)
- dynamic_test_semaphore = \
- item_dynamic_layout.semaphores.get('test-semaphore')
+ dynamic_test_semaphore = item_dynamic_layout.getSemaphore(
+ self.scheds.first.sched.abide, 'test-semaphore')
self.assertEqual(dynamic_test_semaphore.max, 1)
# one build must be in queue, one semaphores acquired
@@ -8152,7 +8202,8 @@ class TestSemaphoreInRepo(ZuulTestCase):
# now that change A was merged, the new semaphore max must be effective
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
- self.assertEqual(tenant.layout.semaphores.get('test-semaphore').max, 2)
+ self.assertEqual(tenant.layout.getSemaphore(
+ self.scheds.first.sched.abide, 'test-semaphore').max, 2)
# two builds must be in queue, two semaphores acquired
self.assertEqual(len(self.builds), 2)
@@ -8748,3 +8799,20 @@ class TestEventProcessing(ZuulTestCase):
dict(name='tagjob', result='SUCCESS'),
dict(name='checkjob', result='SUCCESS', changes='1,1'),
], ordered=False)
+
+
+class TestWaitForInit(ZuulTestCase):
+ tenant_config_file = 'config/single-tenant/main.yaml'
+ wait_for_init = True
+
+ def setUp(self):
+ with self.assertLogs('zuul.Scheduler-0', level='DEBUG') as full_logs:
+ super().setUp()
+ self.assertRegexInList('Waiting for tenant initialization',
+ full_logs.output)
+
+ def test_wait_for_init(self):
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 37a9c092f..ac10eda47 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -1479,6 +1479,38 @@ class TestInRepoConfig(ZuulTestCase):
"A should have failed the check pipeline")
self.assertHistory([])
+ def test_dynamic_nonexistent_job_dependency(self):
+ # Tests that a reference to a nonexistent job dependency is an
+ # error.
+ in_repo_conf = textwrap.dedent(
+ """
+ - job:
+ name: project-test1
+ run: playbooks/project-test1.yaml
+
+ - project:
+ name: org/project
+ check:
+ jobs:
+ - project-test1:
+ dependencies:
+ - name: non-existent-job
+ soft: true
+ """)
+
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+ self.assertEqual(A.reported, 1,
+ "A should report failure")
+ self.assertEqual(A.patchsets[0]['approvals'][0]['value'], "-1")
+ self.assertIn('Job non-existent-job not defined', A.messages[0],
+ "A should have failed the check pipeline")
+ self.assertNotIn('freezing', A.messages[0])
+ self.assertHistory([])
+
def test_dynamic_config_new_patchset(self):
self.executor_server.hold_jobs_in_build = True
@@ -8048,3 +8080,124 @@ class TestConnectionVars(AnsibleZuulTestCase):
# job_output = self._get_file(job, 'work/logs/job-output.txt')
# self.log.debug(job_output)
# self.assertNotIn("/bin/du", job_output)
+
+
+class IncludeBranchesTestCase(ZuulTestCase):
+ def _test_include_branches(self, history1, history2, history3, history4):
+ self.create_branch('org/project', 'stable')
+ self.create_branch('org/project', 'feature/foo')
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project', 'stable'))
+ self.fake_gerrit.addEvent(
+ self.fake_gerrit.getFakeBranchCreatedEvent(
+ 'org/project', 'feature/foo'))
+ self.waitUntilSettled()
+
+ # Test the jobs on the master branch.
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertHistory(history1, ordered=False)
+
+ # Test the jobs on the excluded feature branch.
+ B = self.fake_gerrit.addFakeChange('org/project', 'feature/foo', 'A')
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertHistory(history1 + history2, ordered=False)
+
+ # Test in-repo config proposed on the excluded feature branch.
+ conf = textwrap.dedent(
+ """
+ - job:
+ name: project-dynamic
+
+ - project:
+ check:
+ jobs:
+ - project-dynamic
+ """)
+ file_dict = {'zuul.yaml': conf}
+ C = self.fake_gerrit.addFakeChange('org/project', 'feature/foo', 'A',
+ files=file_dict)
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertHistory(history1 + history2 + history3, ordered=False)
+
+ old = self.scheds.first.sched.tenant_layout_state.get('tenant-one')
+ # Merge a change to the excluded feature branch.
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.waitUntilSettled()
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.assertHistory(history1 + history2 + history3 + history4,
+ ordered=False)
+ new = self.scheds.first.sched.tenant_layout_state.get('tenant-one')
+ # Verify we haven't performed a tenant reconfiguration
+ self.assertTrue(old == new)
+
+
+class TestIncludeBranchesProject(IncludeBranchesTestCase):
+ tenant_config_file = 'config/dynamic-only-project/include.yaml'
+
+ def test_include_branches(self):
+ history1 = [
+ dict(name='central-test', result='SUCCESS', changes='1,1'),
+ dict(name='project-test', result='SUCCESS', changes='1,1'),
+ ]
+ history2 = [
+ dict(name='central-test', result='SUCCESS', changes='2,1'),
+ ]
+ history3 = [
+ dict(name='central-test', result='SUCCESS', changes='3,1'),
+ ]
+ history4 = [
+ dict(name='central-test', result='SUCCESS', changes='2,1'),
+ ]
+ self._test_include_branches(history1, history2, history3, history4)
+
+
+class TestExcludeBranchesProject(IncludeBranchesTestCase):
+ tenant_config_file = 'config/dynamic-only-project/exclude.yaml'
+
+ def test_exclude_branches(self):
+ history1 = [
+ dict(name='central-test', result='SUCCESS', changes='1,1'),
+ dict(name='project-test', result='SUCCESS', changes='1,1'),
+ ]
+ history2 = [
+ dict(name='central-test', result='SUCCESS', changes='2,1'),
+ ]
+ history3 = [
+ dict(name='central-test', result='SUCCESS', changes='3,1'),
+ ]
+ history4 = [
+ dict(name='central-test', result='SUCCESS', changes='2,1'),
+ ]
+ self._test_include_branches(history1, history2, history3, history4)
+
+
+class TestDynamicBranchesProject(IncludeBranchesTestCase):
+ tenant_config_file = 'config/dynamic-only-project/dynamic.yaml'
+
+ def test_dynamic_branches(self):
+ history1 = [
+ dict(name='central-test', result='SUCCESS', changes='1,1'),
+ dict(name='project-test', result='SUCCESS', changes='1,1'),
+ ]
+ history2 = [
+ dict(name='central-test', result='SUCCESS', changes='2,1'),
+ dict(name='project-test', result='SUCCESS', changes='2,1'),
+ ]
+ history3 = [
+ dict(name='central-test', result='SUCCESS', changes='3,1'),
+ dict(name='project-dynamic', result='SUCCESS', changes='3,1'),
+ ]
+ history4 = [
+ dict(name='central-test', result='SUCCESS', changes='2,1'),
+ dict(name='project-test', result='SUCCESS', changes='2,1'),
+ ]
+ self._test_include_branches(history1, history2, history3, history4)
diff --git a/tests/unit/test_web.py b/tests/unit/test_web.py
index 4b696534b..5d38c5b19 100644
--- a/tests/unit/test_web.py
+++ b/tests/unit/test_web.py
@@ -28,6 +28,7 @@ from unittest import skip
import requests
from zuul.lib.statsd import normalize_statsd_name
+from zuul.zk.locks import tenant_write_lock
import zuul.web
from tests.base import ZuulTestCase, AnsibleZuulTestCase
@@ -259,6 +260,8 @@ class TestWeb(BaseTestWeb):
def test_web_tenants(self):
"Test that we can retrieve JSON status info"
+ # Disable tenant list caching
+ self.web.web.api.cache_expiry = 0
self.add_base_changes()
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -315,6 +318,7 @@ class TestWeb(BaseTestWeb):
'baseurl': 'https://review.example.com',
'canonical_hostname': 'review.example.com',
'server': 'review.example.com',
+ 'ssh_server': 'review.example.com',
'port': 29418,
}
self.assertEqual([connection], data)
@@ -371,6 +375,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 4,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
@@ -421,6 +426,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': ['stable'],
+ 'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
@@ -475,6 +481,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
@@ -598,6 +605,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
@@ -636,6 +644,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [{'name': 'project-merge',
'soft': False}],
'description': None,
@@ -675,6 +684,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [{'name': 'project-merge',
'soft': False}],
'description': None,
@@ -714,6 +724,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [{'name': 'project-merge',
'soft': False}],
'description': None,
@@ -778,6 +789,7 @@ class TestWeb(BaseTestWeb):
'ansible_version': None,
'attempts': 3,
'branches': [],
+ 'deduplicate': 'auto',
'dependencies': [],
'description': None,
'files': [],
@@ -929,6 +941,11 @@ class TestWeb(BaseTestWeb):
self.assertEqual("reason text", ah_request['reason'])
self.assertEqual([], ah_request['nodes'])
+ # Unknown tenants return 404
+ resp = self.get_url(
+ "api/tenant/tenant-fifty/autohold")
+ self.assertEqual(404, resp.status_code, resp.text)
+
def test_admin_routes_404_by_default(self):
resp = self.post_url(
"api/tenant/tenant-one/project/org/project/autohold",
@@ -1227,6 +1244,94 @@ class TestWebStatusDisplayBranch(BaseTestWeb):
class TestWebMultiTenant(BaseTestWeb):
tenant_config_file = 'config/multi-tenant/main.yaml'
+ def test_tenant_reconfigure_command(self):
+ # The 'zuul-scheduler tenant-reconfigure' and full-reconfigure
+ # are used to correct problems, and as such they clear the
+ # branch cache. Until the reconfiguration is complete,
+ # zuul-web will be unable to load configuration for any tenant
+ # which has projects that have been cleared from the branch
+ # cache. This test verifies that we retry that operation
+ # after encountering missing branch errors.
+ sched = self.scheds.first.sched
+ web = self.web.web
+ # Don't perform any automatic config updates on zuul web so
+ # that we can control the sequencing.
+ self.web.web._system_config_running = False
+ self.web.web.system_config_cache_wake_event.set()
+ self.web.web.system_config_thread.join()
+
+ first_state = sched.tenant_layout_state.get('tenant-one')
+ self.assertEqual(first_state,
+ web.local_layout_state.get('tenant-one'))
+
+ data = self.get_url('api/tenant/tenant-one/jobs').json()
+ self.assertEqual(len(data), 4)
+
+ # Reconfigure tenant-one so that the layout state will be
+ # different and we can start a layout update in zuul-web
+ # later.
+ self.log.debug("Reconfigure tenant-one")
+ self.scheds.first.tenantReconfigure(['tenant-one'])
+ self.waitUntilSettled()
+ self.log.debug("Done reconfigure tenant-one")
+
+ second_state = sched.tenant_layout_state.get('tenant-one')
+ self.assertEqual(second_state,
+ sched.local_layout_state.get('tenant-one'))
+ self.assertEqual(first_state,
+ web.local_layout_state.get('tenant-one'))
+
+ self.log.debug("Grab write lock for tenant-two")
+ with tenant_write_lock(self.zk_client, 'tenant-two') as lock:
+ # Start a reconfiguration of tenant-two; allow it to
+ # proceed past the point that the branch cache is cleared
+ # and is waiting on the lock we hold.
+ self.scheds.first.tenantReconfigure(
+ ['tenant-two'], command_socket=True)
+ for _ in iterate_timeout(30, "reconfiguration to start"):
+ if 'RECONFIG' in lock.contenders():
+ break
+ # Now that the branch cache is cleared as part of the
+ # tenant-two reconfiguration, allow zuul-web to
+ # reconfigure tenant-one. This should produce an error
+ # because of the missing branch cache.
+ self.log.debug("Web update layout 1")
+ self.web.web.updateSystemConfig()
+ self.assertFalse(self.web.web.updateLayout())
+ self.log.debug("Web update layout done")
+
+ self.assertEqual(second_state,
+ sched.local_layout_state.get('tenant-one'))
+ self.assertEqual(first_state,
+ web.local_layout_state.get('tenant-one'))
+
+ # Make sure we can still access tenant-one's config via
+ # zuul-web
+ data = self.get_url('api/tenant/tenant-one/jobs').json()
+ self.assertEqual(len(data), 4)
+ self.log.debug("Release write lock for tenant-two")
+ for _ in iterate_timeout(30, "reconfiguration to finish"):
+ if 'RECONFIG' not in lock.contenders():
+ break
+
+ self.log.debug("Web update layout 2")
+ self.web.web.updateSystemConfig()
+ self.web.web.updateLayout()
+ self.log.debug("Web update layout done")
+
+ # Depending on tenant order, we may need to run one more time
+ self.log.debug("Web update layout 3")
+ self.web.web.updateSystemConfig()
+ self.assertTrue(self.web.web.updateLayout())
+ self.log.debug("Web update layout done")
+
+ self.assertEqual(second_state,
+ sched.local_layout_state.get('tenant-one'))
+ self.assertEqual(second_state,
+ web.local_layout_state.get('tenant-one'))
+ data = self.get_url('api/tenant/tenant-one/jobs').json()
+ self.assertEqual(len(data), 4)
+
def test_web_labels_allowed_list(self):
labels = ["tenant-one-label", "fake", "tenant-two-label"]
self.fake_nodepool.registerLauncher(labels, "FakeLauncher2")
@@ -1241,6 +1346,8 @@ class TestWebMultiTenant(BaseTestWeb):
def test_tenant_add_remove(self):
"Test that tenants are correctly added/removed to/from the layout"
+ # Disable tenant list caching
+ self.web.web.api.cache_expiry = 0
resp = self.get_url("api/tenants")
data = resp.json()
self.assertEqual(sorted(d["name"] for d in data),
@@ -1278,6 +1385,14 @@ class TestEmptyConfig(BaseTestWeb):
'config/empty-config/git/common-config/new-zuul.yaml')
self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
+
+ layout_scheduler = self.scheds.first.sched.local_layout_state.get(
+ 'tenant-one')
+ for _ in iterate_timeout(10, "local layout of zuul-web to be updated"):
+ layout_web = self.web.web.local_layout_state.get('tenant-one')
+ if layout_web == layout_scheduler:
+ break
+
resp = self.get_url("api/tenant/tenant-one/jobs").json()
self.assertEqual(len(resp), 3)
@@ -2852,7 +2967,7 @@ class TestHeldAttributeInBuildInfo(BaseTestWeb):
class TestWebMulti(BaseTestWeb):
- config_file = 'zuul-gerrit-github.conf'
+ config_file = 'zuul-gerrit-ssh.conf'
def test_web_connections_list_multi(self):
data = self.get_url('api/connections').json()
@@ -2864,6 +2979,7 @@ class TestWebMulti(BaseTestWeb):
'baseurl': url,
'canonical_hostname': 'review.example.com',
'server': 'review.example.com',
+ 'ssh_server': 'ssh-review.example.com',
'port': 29418,
}
github_connection = {
@@ -2872,6 +2988,7 @@ class TestWebMulti(BaseTestWeb):
'driver': 'github',
'name': 'github',
'server': 'github.com',
+ 'repo_cache': None,
}
self.assertEqual([gerrit_connection, github_connection], data)
@@ -2892,7 +3009,7 @@ class TestCLIViaWebApi(BaseTestWeb):
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'autohold', '--reason', 'some reason',
'--tenant', 'tenant-one', '--project', 'org/project',
@@ -2931,7 +3048,7 @@ class TestCLIViaWebApi(BaseTestWeb):
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'enqueue', '--tenant', 'tenant-one',
'--project', 'org/project',
@@ -2960,7 +3077,7 @@ class TestCLIViaWebApi(BaseTestWeb):
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'enqueue-ref', '--tenant', 'tenant-one',
'--project', 'org/project',
@@ -2999,7 +3116,7 @@ class TestCLIViaWebApi(BaseTestWeb):
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'dequeue', '--tenant', 'tenant-one', '--project', 'org/project',
'--pipeline', 'periodic', '--ref', 'refs/heads/stable'],
@@ -3050,7 +3167,7 @@ class TestCLIViaWebApi(BaseTestWeb):
token = jwt.encode(authz, key='NoDanaOnlyZuul',
algorithm='HS256')
p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul'),
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
'--zuul-url', self.base_url, '--auth-token', token,
'promote', '--tenant', 'tenant-one',
'--pipeline', 'gate', '--changes', '2,1', '3,1'],
diff --git a/tools/deprecated-queues.py b/tools/deprecated-queues.py
new file mode 100644
index 000000000..dfd9df497
--- /dev/null
+++ b/tools/deprecated-queues.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+# Copyright 2022 Acme Gating, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import requests
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Find where a project declares a queue")
+ parser.add_argument("url", help="Zuul URL")
+ parser.add_argument("tenant", help="Zuul tenant name")
+ parser.add_argument("--verbose", help="Display progress",
+ action='store_true')
+ args = parser.parse_args()
+
+ projects = requests.get(
+ f'{args.url}/api/tenant/{args.tenant}/projects',
+ ).json()
+
+ pipeline_contexts = set()
+ for tenant_project in projects:
+ if args.verbose:
+ print(f"Checking {tenant_project['name']}")
+ project = requests.get(
+ f"{args.url}/api/tenant/{args.tenant}/project/"
+ f"{tenant_project['name']}",
+ ).json()
+
+ for config in project['configs']:
+ for pipeline in config['pipelines']:
+ if pipeline['queue_name']:
+ pipeline_contexts.add(repr(config['source_context']))
+
+ if pipeline_contexts:
+ print("The following project-pipeline stanzas define a queue.")
+ print("This syntax is deprecated and queue definitions should")
+ print("be moved to the project level.")
+ print("See https://zuul-ci.org/docs/zuul/latest/"
+ "releasenotes.html#relnotes-4-1-0-deprecation-notes")
+ for c in pipeline_contexts:
+ print(c)
+ else:
+ print("Good, no project-pipeline queue definitions found.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/test-logs.sh b/tools/test-logs.sh
index d71b9b7f9..046743aca 100755
--- a/tools/test-logs.sh
+++ b/tools/test-logs.sh
@@ -66,12 +66,12 @@ gathering = smart
gather_subset = !all
fact_caching = jsonfile
fact_caching_connection = ~/.cache/facts
-lookup_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/lookup
-callback_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/callback:$ARA_DIR/plugins/callbacks
-action_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/actiongeneral:${ZUUL_ANSIBLE}/zuul/ansible/actiontrusted
-module_utils = ${ZUUL_ANSIBLE}/zuul/ansible/module_utils
+lookup_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/base/lookup
+callback_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/base/callback:$ARA_DIR/plugins/callbacks
+action_plugins = ${ZUUL_ANSIBLE}/zuul/ansible/base/action
+module_utils = ${ZUUL_ANSIBLE}/zuul/ansible/base/module_utils
stdout_callback = zuul_stream
-library = ${ZUUL_ANSIBLE}/zuul/ansible/library
+library = ${ZUUL_ANSIBLE}/zuul/ansible/base/library
retry_files_enabled = False
EOF
diff --git a/web/src/containers/logfile/LogFile.jsx b/web/src/containers/logfile/LogFile.jsx
index f8b198450..7184646bd 100644
--- a/web/src/containers/logfile/LogFile.jsx
+++ b/web/src/containers/logfile/LogFile.jsx
@@ -59,6 +59,11 @@ export default function LogFile({
// Only highlight the lines if the log is present (otherwise it doesn't make
// sense). Although, scrolling to the selected section only works once the
// necessary log lines are part of the DOM tree.
+ // Additionally note that if we set highlightStart before the page content
+ // is available then the window scrolling won't match any lines and we won't
+ // scroll. Then when we try to set highlightStart after page content is loaded
+ // the value isn't different than what is set previously preventing the
+ // scroll event from firing.
if (!isFetching) {
// Get the line numbers to highlight from the URL and directly cast them to
// a number. The substring(1) removes the '#' character.
diff --git a/web/src/reducers/initialState.js b/web/src/reducers/initialState.js
index ce74956e9..d2e306032 100644
--- a/web/src/reducers/initialState.js
+++ b/web/src/reducers/initialState.js
@@ -23,7 +23,7 @@ export default {
logfile: {
// Store files by buildId->filename->content
files: {},
- isFetching: false,
+ isFetching: true,
url: null,
},
auth: {},
diff --git a/zuul/ansible/2.8/library/command.py b/zuul/ansible/2.8/library/command.py
index f4511ad18..26830e4a7 100755
--- a/zuul/ansible/2.8/library/command.py
+++ b/zuul/ansible/2.8/library/command.py
@@ -493,17 +493,16 @@ def zuul_run_command(self, args, zuul_log_id, check_rc=False, close_fds=True, ex
# ZUUL: store fail_json_kwargs and fail later in finally
fail_json_kwargs = dict(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
finally:
- if t:
- with Console(zuul_log_id) as console:
- if t.is_alive():
- console.addLine("[Zuul] standard output/error still open "
- "after child exited")
- if fail_json_kwargs:
- # we hit an exception and need to use the rc from
- # fail_json_kwargs
- rc = fail_json_kwargs['rc']
-
- console.addLine("[Zuul] Task exit code: %s\n" % rc)
+ with Console(zuul_log_id) as console:
+ if t and t.is_alive():
+ console.addLine("[Zuul] standard output/error still open "
+ "after child exited")
+ if fail_json_kwargs:
+ # we hit an exception and need to use the rc from
+ # fail_json_kwargs
+ rc = fail_json_kwargs['rc']
+
+ console.addLine("[Zuul] Task exit code: %s\n" % rc)
if fail_json_kwargs:
self.fail_json(**fail_json_kwargs)
diff --git a/zuul/ansible/2.9/library/command.py b/zuul/ansible/2.9/library/command.py
index f4511ad18..26830e4a7 100755
--- a/zuul/ansible/2.9/library/command.py
+++ b/zuul/ansible/2.9/library/command.py
@@ -493,17 +493,16 @@ def zuul_run_command(self, args, zuul_log_id, check_rc=False, close_fds=True, ex
# ZUUL: store fail_json_kwargs and fail later in finally
fail_json_kwargs = dict(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
finally:
- if t:
- with Console(zuul_log_id) as console:
- if t.is_alive():
- console.addLine("[Zuul] standard output/error still open "
- "after child exited")
- if fail_json_kwargs:
- # we hit an exception and need to use the rc from
- # fail_json_kwargs
- rc = fail_json_kwargs['rc']
-
- console.addLine("[Zuul] Task exit code: %s\n" % rc)
+ with Console(zuul_log_id) as console:
+ if t and t.is_alive():
+ console.addLine("[Zuul] standard output/error still open "
+ "after child exited")
+ if fail_json_kwargs:
+ # we hit an exception and need to use the rc from
+ # fail_json_kwargs
+ rc = fail_json_kwargs['rc']
+
+ console.addLine("[Zuul] Task exit code: %s\n" % rc)
if fail_json_kwargs:
self.fail_json(**fail_json_kwargs)
diff --git a/zuul/ansible/base/action/command.py b/zuul/ansible/base/action/command.py
index 52202bff9..f36da86f6 100644
--- a/zuul/ansible/base/action/command.py
+++ b/zuul/ansible/base/action/command.py
@@ -22,10 +22,18 @@ class ActionModule(command.ActionModule):
def run(self, tmp=None, task_vars=None):
# we need the zuul_log_id on shell and command tasks
- host = paths._sanitize_filename(task_vars.get('inventory_hostname'))
if self._task.action in (
'command', 'shell',
'ansible.builtin.command', 'ansible.builtin.shell'):
- self._task.args['zuul_log_id'] = "%s-%s" % (self._task._uuid, host)
-
+ # Get a unique key for ZUUL_LOG_ID_MAP. ZUUL_LOG_ID_MAP
+ # is read-only since we are forked. Use it to add a
+ # counter to the log id so that if we run the same task
+ # more than once, we get a unique log file. See comments
+ # in paths.py for details.
+ log_host = paths._sanitize_filename(
+ task_vars.get('inventory_hostname'))
+ key = "%s-%s" % (self._task._uuid, log_host)
+ count = paths.ZUUL_LOG_ID_MAP.get(key, 0)
+ self._task.args['zuul_log_id'] = "%s-%s-%s" % (
+ self._task._uuid, count, log_host)
return super(ActionModule, self).run(tmp, task_vars)
diff --git a/zuul/ansible/base/callback/zuul_stream.py b/zuul/ansible/base/callback/zuul_stream.py
index 09cc16621..184dbe78f 100644
--- a/zuul/ansible/base/callback/zuul_stream.py
+++ b/zuul/ansible/base/callback/zuul_stream.py
@@ -261,7 +261,8 @@ class CallbackModule(default.CallbackModule):
if task.async_val:
# Don't try to stream from async tasks
return
- if task.action in ('command', 'shell'):
+ if task.action in ('command', 'shell',
+ 'ansible.builtin.command', 'ansible.builtin.shell'):
play_vars = self._play._variable_manager._hostvars
hosts = self._get_task_hosts(task)
@@ -290,8 +291,17 @@ class CallbackModule(default.CallbackModule):
continue
ip = '127.0.0.1'
- log_id = "%s-%s" % (
- task._uuid, paths._sanitize_filename(inventory_hostname))
+ # Get a unique key for ZUUL_LOG_ID_MAP. Use it to add
+ # a counter to the log id so that if we run the same
+ # task more than once, we get a unique log file. See
+ # comments in paths.py for details.
+ log_host = paths._sanitize_filename(inventory_hostname)
+ key = "%s-%s" % (self._task._uuid, log_host)
+ count = paths.ZUUL_LOG_ID_MAP.get(key, 0) + 1
+ paths.ZUUL_LOG_ID_MAP[key] = count
+ log_id = "%s-%s-%s" % (
+ self._task._uuid, count, log_host)
+
streamer = threading.Thread(
target=self._read_log, args=(
host, ip, port, log_id, task_name, hosts))
diff --git a/zuul/ansible/base/library/command.py b/zuul/ansible/base/library/command.py
index ced439747..d496b037c 100755
--- a/zuul/ansible/base/library/command.py
+++ b/zuul/ansible/base/library/command.py
@@ -545,17 +545,16 @@ def zuul_run_command(self, args, zuul_log_id, check_rc=False, close_fds=True, ex
# ZUUL: store fail_json_kwargs and fail later in finally
fail_json_kwargs = dict(rc=257, stdout=b'', stderr=b'', msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
finally:
- if t:
- with Console(zuul_log_id) as console:
- if t.is_alive():
- console.addLine("[Zuul] standard output/error still open "
- "after child exited")
- if fail_json_kwargs:
- # we hit an exception and need to use the rc from
- # fail_json_kwargs
- rc = fail_json_kwargs['rc']
-
- console.addLine("[Zuul] Task exit code: %s\n" % rc)
+ with Console(zuul_log_id) as console:
+ if t and t.is_alive():
+ console.addLine("[Zuul] standard output/error still open "
+ "after child exited")
+ if fail_json_kwargs:
+ # we hit an exception and need to use the rc from
+ # fail_json_kwargs
+ rc = fail_json_kwargs['rc']
+
+ console.addLine("[Zuul] Task exit code: %s\n" % rc)
if fail_json_kwargs:
self.fail_json(**fail_json_kwargs)
diff --git a/zuul/ansible/paths.py b/zuul/ansible/paths.py
index 229c16a71..895e31403 100644
--- a/zuul/ansible/paths.py
+++ b/zuul/ansible/paths.py
@@ -52,3 +52,20 @@ def _import_ansible_action_plugin(name):
def _sanitize_filename(name):
return ''.join(c for c in name if c.isalnum())
+
+
+# Ansible assigns a unique id to every task (Task._uuid). However, if
+# a role is included more than once, the task object is re-used. In
+# order to provide unique log ids for the Zuul command log streaming
+# system, this global dictionary is used to map keys that are derived
+# from tasks (task._uuid concatenated with the host name) to a counter
+# which is incremented each time the task+host combination is
+# encountered. Ansible will not run more than one task on a host
+# simultaneously, so this should be sufficiently unique to avoid
+# collisions.
+#
+# We use a global dictionary defined here so that zuul_stream can
+# write to it and zuul.ansible.command modules can read it. Note that
+# the command module operates after a fork and therefore it should be
+# treated as read-only there.
+ZUUL_LOG_ID_MAP = {}
diff --git a/zuul/cmd/client.py b/zuul/cmd/client.py
index fc1ea6a2e..490e47c59 100755
--- a/zuul/cmd/client.py
+++ b/zuul/cmd/client.py
@@ -1,5 +1,6 @@
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2013 OpenStack Foundation
+# Copyright 2022 Acme Gating, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -16,6 +17,8 @@
import argparse
import babel.dates
import datetime
+import dateutil.parser
+import dateutil.tz
import json
import jwt
import logging
@@ -40,6 +43,28 @@ from zuul.zk.layout import LayoutState, LayoutStateStore
from zuul.zk.components import COMPONENT_REGISTRY
+def parse_cutoff(now, before, older_than):
+ if before and not older_than:
+ cutoff = dateutil.parser.parse(before)
+ if cutoff.tzinfo and cutoff.tzinfo != dateutil.tz.tzutc():
+ raise RuntimeError("Timestamp must be specified as UTC")
+ cutoff = cutoff.replace(tzinfo=dateutil.tz.tzutc())
+ return cutoff
+ elif older_than and not before:
+ value = older_than[:-1]
+ suffix = older_than[-1]
+ if suffix == 'd':
+ delta = datetime.timedelta(days=int(value))
+ elif suffix == 'h':
+ delta = datetime.timedelta(hours=int(value))
+ else:
+ raise RuntimeError("Unsupported relative time")
+ return now - delta
+ else:
+ raise RuntimeError(
+ "Either --before or --older-than must be supplied")
+
+
# todo This should probably live somewhere else
class ZuulRESTClient(object):
"""Basic client for Zuul's REST API"""
@@ -189,7 +214,8 @@ class Client(zuul.cmd.ZuulApp):
# Autohold
cmd_autohold = subparsers.add_parser(
- 'autohold', help='hold nodes for failed job')
+ 'autohold', help='[DEPRECATED - use zuul-client] '
+ 'hold nodes for failed job')
cmd_autohold.add_argument('--tenant', help='tenant name',
required=True)
cmd_autohold.add_argument('--project', help='project name',
@@ -214,25 +240,30 @@ class Client(zuul.cmd.ZuulApp):
cmd_autohold.set_defaults(func=self.autohold)
cmd_autohold_delete = subparsers.add_parser(
- 'autohold-delete', help='delete autohold request')
+ 'autohold-delete', help='[DEPRECATED - use zuul-client] '
+ 'delete autohold request')
cmd_autohold_delete.set_defaults(func=self.autohold_delete)
cmd_autohold_delete.add_argument('id', metavar='REQUEST_ID',
help='the hold request ID')
cmd_autohold_info = subparsers.add_parser(
- 'autohold-info', help='retrieve autohold request detailed info')
+ 'autohold-info', help='[DEPRECATED - use zuul-client] '
+ 'retrieve autohold request detailed info')
cmd_autohold_info.set_defaults(func=self.autohold_info)
cmd_autohold_info.add_argument('id', metavar='REQUEST_ID',
help='the hold request ID')
cmd_autohold_list = subparsers.add_parser(
- 'autohold-list', help='list autohold requests')
+ 'autohold-list', help='[DEPRECATED - use zuul-client] '
+ 'list autohold requests')
cmd_autohold_list.add_argument('--tenant', help='tenant name',
required=True)
cmd_autohold_list.set_defaults(func=self.autohold_list)
# Enqueue/Dequeue
- cmd_enqueue = subparsers.add_parser('enqueue', help='enqueue a change')
+ cmd_enqueue = subparsers.add_parser(
+ 'enqueue',
+ help='[DEPRECATED - use zuul-client] enqueue a change')
cmd_enqueue.add_argument('--tenant', help='tenant name',
required=True)
# TODO(mhu) remove in a few releases
@@ -249,7 +280,8 @@ class Client(zuul.cmd.ZuulApp):
cmd_enqueue.set_defaults(func=self.enqueue)
cmd_enqueue = subparsers.add_parser(
- 'enqueue-ref', help='enqueue a ref',
+ 'enqueue-ref',
+ help='[DEPRECATED - use zuul-client] enqueue a ref',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Submit a trigger event
@@ -273,9 +305,11 @@ class Client(zuul.cmd.ZuulApp):
'--newrev', help='new revision', default=None)
cmd_enqueue.set_defaults(func=self.enqueue_ref)
- cmd_dequeue = subparsers.add_parser('dequeue',
- help='dequeue a buildset by its '
- 'change or ref')
+ cmd_dequeue = subparsers.add_parser(
+ 'dequeue',
+ help='[DEPRECATED - use zuul-client] '
+ 'dequeue a buildset by its '
+ 'change or ref')
cmd_dequeue.add_argument('--tenant', help='tenant name',
required=True)
cmd_dequeue.add_argument('--pipeline', help='pipeline name',
@@ -289,8 +323,10 @@ class Client(zuul.cmd.ZuulApp):
cmd_dequeue.set_defaults(func=self.dequeue)
# Promote
- cmd_promote = subparsers.add_parser('promote',
- help='promote one or more changes')
+ cmd_promote = subparsers.add_parser(
+ 'promote',
+ help='[DEPRECATED - use zuul-client] '
+ 'promote one or more changes')
cmd_promote.add_argument('--tenant', help='tenant name',
required=True)
cmd_promote.add_argument('--pipeline', help='pipeline name',
@@ -300,8 +336,10 @@ class Client(zuul.cmd.ZuulApp):
cmd_promote.set_defaults(func=self.promote)
# Show
- cmd_show = subparsers.add_parser('show',
- help='show current statuses')
+ cmd_show = subparsers.add_parser(
+ 'show',
+ help='[DEPRECATED - use zuul-client] '
+ 'show current statuses')
cmd_show.set_defaults(func=self.show_running_jobs)
show_subparsers = cmd_show.add_subparsers(title='show')
show_running_jobs = show_subparsers.add_parser(
@@ -485,6 +523,27 @@ class Client(zuul.cmd.ZuulApp):
help='tenant name')
cmd_delete_pipeline_state.add_argument('pipeline', type=str,
help='pipeline name')
+
+ # DB Maintenance
+ cmd_prune_database = subparsers.add_parser(
+ 'prune-database',
+ help='prune old database entries',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=textwrap.dedent('''\
+ Prune old database entries
+
+ This command will delete database entries older than the
+ specified cutoff (which can be specified as either an
+ absolute or relative time).'''))
+ cmd_prune_database.set_defaults(command='prune-database')
+ cmd_prune_database.add_argument(
+ '--before',
+ help='absolute timestamp (e.g., "2022-01-31 12:00:00")')
+ cmd_prune_database.add_argument(
+ '--older-than',
+ help='relative time (e.g., "24h" or "180d")')
+ cmd_prune_database.set_defaults(func=self.prune_database)
+
return parser
def parseArguments(self, args=None):
@@ -521,7 +580,13 @@ class Client(zuul.cmd.ZuulApp):
if not self.args.zuul_url:
self.readConfig()
self.setup_logging()
-
+ if self.args.func in [self.autohold, self.autohold_delete,
+ self.enqueue, self.enqueue_ref,
+ self.dequeue, self.promote]:
+ print(
+ "Warning: this command is deprecated with zuul-admin, "
+ "please use `zuul-client` instead",
+ file=sys.stderr)
if self.args.func():
sys.exit(0)
else:
@@ -544,7 +609,7 @@ class Client(zuul.cmd.ZuulApp):
self.args.auth_token)
else:
print('Unable to find a way to connect to Zuul, add a '
- '"web" section to your configuration file')
+ '"webclient" section to your configuration file')
sys.exit(1)
if server is None:
print('Missing "server" configuration value')
@@ -988,6 +1053,21 @@ class Client(zuul.cmd.ZuulApp):
sys.exit(0)
+ def prune_database(self):
+ logging.basicConfig(level=logging.INFO)
+ args = self.args
+ now = datetime.datetime.now(dateutil.tz.tzutc())
+ cutoff = parse_cutoff(now, args.before, args.older_than)
+ self.configure_connections(source_only=False, require_sql=True)
+ connection = self.connections.getSqlConnection()
+ connection.deleteBuildsets(cutoff)
+ sys.exit(0)
+
def main():
+ if sys.argv[0].endswith('zuul'):
+ print(
+ "Warning: this command name is deprecated, "
+ "use `zuul-admin` instead",
+ file=sys.stderr)
Client().main()
diff --git a/zuul/cmd/executor.py b/zuul/cmd/executor.py
index 3c98ecd4a..eb463348e 100755
--- a/zuul/cmd/executor.py
+++ b/zuul/cmd/executor.py
@@ -67,12 +67,16 @@ class Executor(zuul.cmd.ZuulDaemonApp):
# Keep running until the parent dies:
pipe_read = os.fdopen(pipe_read)
- pipe_read.read()
+ try:
+ pipe_read.read()
+ except KeyboardInterrupt:
+ pass
self.log.info("Stopping log streamer")
streamer.stop()
os._exit(0)
else:
os.close(pipe_read)
+ self.log_streamer_pipe = pipe_write
self.log_streamer_pid = child_pid
def run(self):
@@ -113,7 +117,16 @@ class Executor(zuul.cmd.ZuulDaemonApp):
if self.args.nodaemon:
signal.signal(signal.SIGTERM, self.exit_handler)
- self.executor.join()
+ while True:
+ try:
+ self.executor.join()
+ break
+ except KeyboardInterrupt:
+ print("Ctrl + C: asking executor to exit nicely...\n")
+ self.exit_handler(signal.SIGINT, None)
+
+ os.close(self.log_streamer_pipe)
+ os.waitpid(self.log_streamer_pid, 0)
def main():
diff --git a/zuul/cmd/fingergw.py b/zuul/cmd/fingergw.py
index 04e0d5cc8..2f7dbdb59 100644
--- a/zuul/cmd/fingergw.py
+++ b/zuul/cmd/fingergw.py
@@ -38,6 +38,10 @@ class FingerGatewayApp(zuul.cmd.ZuulDaemonApp):
self.addSubCommands(parser, fingergw.COMMANDS)
return parser
+ def exit_handler(self, signum, frame):
+ if self.gateway:
+ self.gateway.stop()
+
def run(self):
'''
Main entry point for the FingerGatewayApp.
@@ -59,28 +63,18 @@ class FingerGatewayApp(zuul.cmd.ZuulDaemonApp):
self.getPidFile(),
)
- self.log.info('Starting Zuul finger gateway app')
self.gateway.start()
if self.args.nodaemon:
- # NOTE(Shrews): When running in non-daemon mode, although sending
- # the 'stop' command via the command socket will shutdown the
- # gateway, it's still necessary to Ctrl+C to stop the app.
- while True:
- try:
- signal.pause()
- except KeyboardInterrupt:
- print("Ctrl + C: asking gateway to exit nicely...\n")
- self.stop()
- break
- else:
- self.gateway.wait()
-
- self.log.info('Stopped Zuul finger gateway app')
-
- def stop(self):
- if self.gateway:
- self.gateway.stop()
+ signal.signal(signal.SIGTERM, self.exit_handler)
+
+ while True:
+ try:
+ self.gateway.join()
+ break
+ except KeyboardInterrupt:
+ print("Ctrl + C: asking gateway to exit nicely...\n")
+ self.exit_handler(signal.SIGINT, None)
def main():
diff --git a/zuul/cmd/merger.py b/zuul/cmd/merger.py
index 659399c85..f1e0bea88 100755
--- a/zuul/cmd/merger.py
+++ b/zuul/cmd/merger.py
@@ -49,19 +49,15 @@ class Merger(zuul.cmd.ZuulDaemonApp):
if self.args.nodaemon:
signal.signal(signal.SIGTERM, self.exit_handler)
- while True:
- try:
- signal.pause()
- except KeyboardInterrupt:
- print("Ctrl + C: asking merger to exit nicely...\n")
- self.exit_handler(signal.SIGINT, None)
- else:
- self.merger.join()
+
+ while True:
+ try:
+ self.merger.join()
+ break
+ except KeyboardInterrupt:
+ print("Ctrl + C: asking merger to exit nicely...\n")
+ self.exit_handler(signal.SIGINT, None)
def main():
Merger().main()
-
-
-if __name__ == "__main__":
- main()
diff --git a/zuul/cmd/scheduler.py b/zuul/cmd/scheduler.py
index 176466cfe..7ed30b68e 100755
--- a/zuul/cmd/scheduler.py
+++ b/zuul/cmd/scheduler.py
@@ -39,6 +39,10 @@ class Scheduler(zuul.cmd.ZuulDaemonApp):
'listed, all tenants will be validated. '
'Note: this requires ZooKeeper and '
'will distribute work to mergers.')
+ parser.add_argument('--wait-for-init', dest='wait_for_init',
+ action='store_true',
+ help='Wait until all tenants are fully loaded '
+ 'before beginning to process events.')
self.addSubCommands(parser, zuul.scheduler.COMMANDS)
return parser
@@ -82,7 +86,8 @@ class Scheduler(zuul.cmd.ZuulDaemonApp):
self.configure_connections(require_sql=True)
self.sched = zuul.scheduler.Scheduler(self.config,
- self.connections, self)
+ self.connections, self,
+ self.args.wait_for_init)
if self.args.validate_tenants is None:
self.connections.registerScheduler(self.sched)
self.connections.load(self.sched.zk_client,
@@ -109,14 +114,14 @@ class Scheduler(zuul.cmd.ZuulDaemonApp):
if self.args.nodaemon:
signal.signal(signal.SIGTERM, self.exit_handler)
- while True:
- try:
- signal.pause()
- except KeyboardInterrupt:
- print("Ctrl + C: asking scheduler to exit nicely...\n")
- self.exit_handler(signal.SIGINT, None)
- else:
- self.sched.join()
+
+ while True:
+ try:
+ self.sched.join()
+ break
+ except KeyboardInterrupt:
+ print("Ctrl + C: asking scheduler to exit nicely...\n")
+ self.exit_handler(signal.SIGINT, None)
def main():
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 9eab376f8..eb468518f 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -12,6 +12,7 @@
import collections
from contextlib import contextmanager
+from concurrent.futures import ThreadPoolExecutor, as_completed
import copy
import itertools
import os
@@ -25,6 +26,7 @@ import voluptuous as vs
from zuul import change_matcher
from zuul import model
+from zuul.connection import ReadOnlyBranchCacheError
from zuul.lib import yamlutil as yaml
import zuul.manager.dependent
import zuul.manager.independent
@@ -189,6 +191,16 @@ class ProjectNotPermittedError(Exception):
super(ProjectNotPermittedError, self).__init__(message)
+class GlobalSemaphoreNotFoundError(Exception):
+ def __init__(self, semaphore):
+ message = textwrap.dedent("""\
+ The global semaphore "{semaphore}" was not found. All
+ global semaphores must be added to the main configuration
+ file by the Zuul administrator.""")
+ message = textwrap.fill(message.format(semaphore=semaphore))
+ super(GlobalSemaphoreNotFoundError, self).__init__(message)
+
+
class YAMLDuplicateKeyError(ConfigurationSyntaxError):
def __init__(self, key, node, context, start_mark):
intro = textwrap.fill(textwrap.dedent("""\
@@ -233,6 +245,8 @@ def project_configuration_exceptions(context, accumulator):
yield
except ConfigurationSyntaxError:
raise
+ except ReadOnlyBranchCacheError:
+ raise
except Exception as e:
intro = textwrap.fill(textwrap.dedent("""\
Zuul encountered an error while accessing the repo {repo}. The error
@@ -633,6 +647,7 @@ class JobParser(object):
'post-review': bool,
'match-on-config-updates': bool,
'workspace-scheme': vs.Any('golang', 'flat', 'unique'),
+ 'deduplicate': vs.Any(bool, 'auto'),
}
job_name = {vs.Required('name'): str}
@@ -658,6 +673,7 @@ class JobParser(object):
'override-checkout',
'match-on-config-updates',
'workspace-scheme',
+ 'deduplicate',
]
def __init__(self, pcontext):
@@ -1211,6 +1227,7 @@ class PipelineParser(object):
'start-message': str,
'merge-conflict-message': str,
'merge-failure-message': str,
+ 'enqueue-message': str,
'no-jobs-message': str,
'footer-message': str,
'dequeue-message': str,
@@ -1448,6 +1465,26 @@ class AuthorizationRuleParser(object):
return a
+class GlobalSemaphoreParser(object):
+ def __init__(self):
+ self.log = logging.getLogger("zuul.GlobalSemaphoreParser")
+ self.schema = self.getSchema()
+
+ def getSchema(self):
+ semaphore = {vs.Required('name'): str,
+ 'max': int,
+ }
+
+ return vs.Schema(semaphore)
+
+ def fromYaml(self, conf):
+ self.schema(conf)
+ semaphore = model.Semaphore(conf['name'], conf.get('max', 1),
+ global_scope=True)
+ semaphore.freeze()
+ return semaphore
+
+
class ParseContext(object):
"""Hold information about a particular run of the parser"""
@@ -1516,6 +1553,9 @@ class TenantParser(object):
'exclude-unprotected-branches': bool,
'extra-config-paths': no_dup_config_paths,
'load-branch': str,
+ 'include-branches': to_list(str),
+ 'exclude-branches': to_list(str),
+ 'always-dynamic-branches': to_list(str),
'allow-circular-dependencies': bool,
}}
@@ -1561,6 +1601,7 @@ class TenantParser(object):
'default-parent': str,
'default-ansible-version': vs.Any(str, float),
'admin-rules': to_list(str),
+ 'semaphores': to_list(str),
'authentication-realm': str,
# TODO: Ignored, allowed for backwards compat, remove for v5.
'report-build-page': bool,
@@ -1568,7 +1609,7 @@ class TenantParser(object):
}
return vs.Schema(tenant)
- def fromYaml(self, abide, conf, ansible_manager, min_ltimes=None,
+ def fromYaml(self, abide, conf, ansible_manager, executor, min_ltimes=None,
layout_uuid=None, branch_cache_min_ltimes=None,
ignore_cat_exception=True):
# Note: This vs schema validation is not necessary in most cases as we
@@ -1593,6 +1634,11 @@ class TenantParser(object):
tenant.authorization_rules = conf['admin-rules']
if conf.get('authentication-realm') is not None:
tenant.default_auth_realm = conf['authentication-realm']
+ if conf.get('semaphores') is not None:
+ tenant.global_semaphores = set(as_list(conf['semaphores']))
+ for semaphore_name in tenant.global_semaphores:
+ if semaphore_name not in abide.semaphores:
+ raise GlobalSemaphoreNotFoundError(semaphore_name)
tenant.web_root = conf.get('web-root', self.globals.web_root)
if tenant.web_root and not tenant.web_root.endswith('/'):
tenant.web_root += '/'
@@ -1614,7 +1660,15 @@ class TenantParser(object):
# We prepare a stack to store config loading issues
loading_errors = model.LoadingErrors()
+ # Get branches in parallel
+ branch_futures = {}
for tpc in config_tpcs + untrusted_tpcs:
+ future = executor.submit(self._getProjectBranches,
+ tenant, tpc, branch_cache_min_ltimes)
+ branch_futures[future] = tpc
+
+ for branch_future in as_completed(branch_futures.keys()):
+ tpc = branch_futures[branch_future]
source_context = model.ProjectContext(
tpc.project.canonical_name, tpc.project.name)
with project_configuration_exceptions(source_context,
@@ -1637,7 +1691,7 @@ class TenantParser(object):
# already cached. Full reconfigurations start with an empty
# cache.
self._cacheTenantYAML(abide, tenant, loading_errors, min_ltimes,
- ignore_cat_exception)
+ executor, ignore_cat_exception)
# Then collect the appropriate YAML based on this tenant
# config.
@@ -1665,7 +1719,7 @@ class TenantParser(object):
if self.scheduler:
tenant.semaphore_handler = SemaphoreHandler(
- self.zk_client, self.statsd, tenant.name, tenant.layout
+ self.zk_client, self.statsd, tenant.name, tenant.layout, abide
)
# Only call the postConfig hook if we have a scheduler as this will
# change data in ZooKeeper. In case we are in a zuul-web context,
@@ -1701,7 +1755,15 @@ class TenantParser(object):
if 'master' in branches:
branches.remove('master')
branches = ['master'] + branches
- tpc.branches = branches
+ static_branches = []
+ always_dynamic_branches = []
+ for b in branches:
+ if tpc.includesBranch(b):
+ static_branches.append(b)
+ elif tpc.isAlwaysDynamicBranch(b):
+ always_dynamic_branches.append(b)
+ tpc.branches = static_branches
+ tpc.dynamic_branches = always_dynamic_branches
def _loadProjectKeys(self, connection_name, project):
project.private_secrets_key, project.public_secrets_key = (
@@ -1725,6 +1787,9 @@ class TenantParser(object):
project_include = current_include
shadow_projects = []
project_exclude_unprotected_branches = None
+ project_include_branches = None
+ project_exclude_branches = None
+ project_always_dynamic_branches = None
project_load_branch = None
else:
project_name = list(conf.keys())[0]
@@ -1743,6 +1808,34 @@ class TenantParser(object):
project_include = frozenset(project_include - project_exclude)
project_exclude_unprotected_branches = conf[project_name].get(
'exclude-unprotected-branches', None)
+ project_include_branches = conf[project_name].get(
+ 'include-branches', None)
+ if project_include_branches is not None:
+ project_include_branches = [
+ re.compile(b) for b in as_list(project_include_branches)
+ ]
+ exclude_branches = conf[project_name].get(
+ 'exclude-branches', None)
+ if exclude_branches is not None:
+ project_exclude_branches = [
+ re.compile(b) for b in as_list(exclude_branches)
+ ]
+ else:
+ project_exclude_branches = None
+ always_dynamic_branches = conf[project_name].get(
+ 'always-dynamic-branches', None)
+ if always_dynamic_branches is not None:
+ if project_exclude_branches is None:
+ project_exclude_branches = []
+ exclude_branches = []
+ project_always_dynamic_branches = []
+ for b in always_dynamic_branches:
+ rb = re.compile(b)
+ if b not in exclude_branches:
+ project_exclude_branches.append(rb)
+ project_always_dynamic_branches.append(rb)
+ else:
+ project_always_dynamic_branches = None
if conf[project_name].get('extra-config-paths') is not None:
extra_config_paths = as_list(
conf[project_name]['extra-config-paths'])
@@ -1758,6 +1851,10 @@ class TenantParser(object):
tenant_project_config.shadow_projects = shadow_projects
tenant_project_config.exclude_unprotected_branches = \
project_exclude_unprotected_branches
+ tenant_project_config.include_branches = project_include_branches
+ tenant_project_config.exclude_branches = project_exclude_branches
+ tenant_project_config.always_dynamic_branches = \
+ project_always_dynamic_branches
tenant_project_config.extra_config_files = extra_config_files
tenant_project_config.extra_config_dirs = extra_config_dirs
tenant_project_config.load_branch = project_load_branch
@@ -1791,7 +1888,7 @@ class TenantParser(object):
raise Exception("Unable to parse project %s", conf)
return projects
- def loadTenantProjects(self, conf_tenant):
+ def loadTenantProjects(self, conf_tenant, executor):
config_projects = []
untrusted_projects = []
@@ -1799,6 +1896,7 @@ class TenantParser(object):
'secret', 'project-template', 'nodeset',
'queue'])
+ futures = []
for source_name, conf_source in conf_tenant.get('source', {}).items():
source = self.connections.getSource(source_name)
@@ -1807,7 +1905,8 @@ class TenantParser(object):
# tpcs = TenantProjectConfigs
tpcs = self._getProjects(source, conf_repo, current_include)
for tpc in tpcs:
- self._loadProjectKeys(source_name, tpc.project)
+ futures.append(executor.submit(
+ self._loadProjectKeys, source_name, tpc.project))
config_projects.append(tpc)
current_include = frozenset(default_include - set(['pipeline']))
@@ -1815,13 +1914,16 @@ class TenantParser(object):
tpcs = self._getProjects(source, conf_repo,
current_include)
for tpc in tpcs:
- self._loadProjectKeys(source_name, tpc.project)
+ futures.append(executor.submit(
+ self._loadProjectKeys, source_name, tpc.project))
untrusted_projects.append(tpc)
+ for f in futures:
+ f.result()
return config_projects, untrusted_projects
def _cacheTenantYAML(self, abide, tenant, loading_errors, min_ltimes,
- ignore_cat_exception=True):
+ executor, ignore_cat_exception=True):
# min_ltimes can be the following: None (that means that we
# should not use the file cache at all) or a nested dict of
# project and branch to ltime. A value of None usually means
@@ -1885,6 +1987,7 @@ class TenantParser(object):
jobs = []
+ futures = []
for project in itertools.chain(
tenant.config_projects, tenant.untrusted_projects):
tpc = tenant.project_configs[project.canonical_name]
@@ -1898,62 +2001,13 @@ class TenantParser(object):
# If all config classes are excluded then do not
# request any getFiles jobs.
continue
+ futures.append(executor.submit(self._cacheTenantYAMLBranch,
+ abide, tenant, loading_errors,
+ min_ltimes, tpc, project,
+ branch, jobs))
+ for future in futures:
+ future.result()
- source_context = model.SourceContext(
- project.canonical_name, project.name,
- project.connection_name, branch, '', False)
- if min_ltimes is not None:
- files_cache = self.unparsed_config_cache.getFilesCache(
- project.canonical_name, branch)
- branch_cache = abide.getUnparsedBranchCache(
- project.canonical_name, branch)
- pb_ltime = min_ltimes[project.canonical_name][branch]
-
- # If our unparsed branch cache is valid for the
- # time, then we don't need to do anything else.
- if branch_cache.isValidFor(tpc, pb_ltime):
- min_ltimes[project.canonical_name][branch] =\
- branch_cache.ltime
- continue
-
- with self.unparsed_config_cache.readLock(
- project.canonical_name):
- if files_cache.isValidFor(tpc, pb_ltime):
- self.log.debug(
- "Using files from cache for project "
- "%s @%s: %s",
- project.canonical_name, branch,
- list(files_cache.keys()))
- self._updateUnparsedBranchCache(
- abide, tenant, source_context, files_cache,
- loading_errors, files_cache.ltime,
- min_ltimes)
- continue
-
- extra_config_files = abide.getExtraConfigFiles(project.name)
- extra_config_dirs = abide.getExtraConfigDirs(project.name)
- if not self.merger:
- with project_configuration_exceptions(source_context,
- loading_errors):
- raise Exception(
- "Configuration files missing from cache. "
- "Check Zuul scheduler logs for more information.")
- continue
- ltime = self.zk_client.getCurrentLtime()
- job = self.merger.getFiles(
- project.source.connection.connection_name,
- project.name, branch,
- files=(['zuul.yaml', '.zuul.yaml'] +
- list(extra_config_files)),
- dirs=['zuul.d', '.zuul.d'] + list(extra_config_dirs))
- self.log.debug("Submitting cat job %s for %s %s %s" % (
- job, project.source.connection.connection_name,
- project.name, branch))
- job.extra_config_files = extra_config_files
- job.extra_config_dirs = extra_config_dirs
- job.ltime = ltime
- job.source_context = source_context
- jobs.append(job)
try:
self._processCatJobs(abide, tenant, loading_errors, jobs,
min_ltimes)
@@ -1970,7 +2024,76 @@ class TenantParser(object):
if not ignore_cat_exception:
raise
+ def _cacheTenantYAMLBranch(self, abide, tenant, loading_errors, min_ltimes,
+ tpc, project, branch, jobs):
+ # This is the middle section of _cacheTenantYAML, called for
+ # each project-branch. It's a separate method so we can
+ # execute it in parallel. The "jobs" argument is mutated and
+ # accumulates a list of all merger jobs submitted.
+ source_context = model.SourceContext(
+ project.canonical_name, project.name,
+ project.connection_name, branch, '', False)
+ if min_ltimes is not None:
+ files_cache = self.unparsed_config_cache.getFilesCache(
+ project.canonical_name, branch)
+ branch_cache = abide.getUnparsedBranchCache(
+ project.canonical_name, branch)
+ try:
+ pb_ltime = min_ltimes[project.canonical_name][branch]
+ except KeyError:
+ self.log.exception(
+ "Min. ltime missing for project/branch")
+ pb_ltime = -1
+
+ # If our unparsed branch cache is valid for the
+ # time, then we don't need to do anything else.
+ if branch_cache.isValidFor(tpc, pb_ltime):
+ min_ltimes[project.canonical_name][branch] =\
+ branch_cache.ltime
+ return
+
+ with self.unparsed_config_cache.readLock(
+ project.canonical_name):
+ if files_cache.isValidFor(tpc, pb_ltime):
+ self.log.debug(
+ "Using files from cache for project "
+ "%s @%s: %s",
+ project.canonical_name, branch,
+ list(files_cache.keys()))
+ self._updateUnparsedBranchCache(
+ abide, tenant, source_context, files_cache,
+ loading_errors, files_cache.ltime,
+ min_ltimes)
+ return
+
+ extra_config_files = abide.getExtraConfigFiles(project.name)
+ extra_config_dirs = abide.getExtraConfigDirs(project.name)
+ if not self.merger:
+ with project_configuration_exceptions(source_context,
+ loading_errors):
+ raise Exception(
+ "Configuration files missing from cache. "
+ "Check Zuul scheduler logs for more information.")
+ return
+ ltime = self.zk_client.getCurrentLtime()
+ job = self.merger.getFiles(
+ project.source.connection.connection_name,
+ project.name, branch,
+ files=(['zuul.yaml', '.zuul.yaml'] +
+ list(extra_config_files)),
+ dirs=['zuul.d', '.zuul.d'] + list(extra_config_dirs))
+ self.log.debug("Submitting cat job %s for %s %s %s" % (
+ job, project.source.connection.connection_name,
+ project.name, branch))
+ job.extra_config_files = extra_config_files
+ job.extra_config_dirs = extra_config_dirs
+ job.ltime = ltime
+ job.source_context = source_context
+ jobs.append(job)
+
def _processCatJobs(self, abide, tenant, loading_errors, jobs, min_ltimes):
+ # Called at the end of _cacheTenantYAML after all cat jobs
+ # have been submitted
for job in jobs:
self.log.debug("Waiting for cat job %s" % (job,))
res = job.wait(self.merger.git_timeout)
@@ -2380,6 +2503,7 @@ class ConfigLoader(object):
connections, zk_client, scheduler, merger, keystorage,
zuul_globals, statsd)
self.admin_rule_parser = AuthorizationRuleParser()
+ self.global_semaphore_parser = GlobalSemaphoreParser()
def expandConfigPath(self, config_path):
if config_path:
@@ -2436,6 +2560,12 @@ class ConfigLoader(object):
admin_rule = self.admin_rule_parser.fromYaml(conf_admin_rule)
abide.admin_rules[admin_rule.name] = admin_rule
+ def loadSemaphores(self, abide, unparsed_abide):
+ abide.semaphores.clear()
+ for conf_semaphore in unparsed_abide.semaphores:
+ semaphore = self.global_semaphore_parser.fromYaml(conf_semaphore)
+ abide.semaphores[semaphore.name] = semaphore
+
def loadTPCs(self, abide, unparsed_abide, tenants=None):
if tenants:
tenants_to_load = {t: unparsed_abide.tenants[t] for t in tenants
@@ -2445,15 +2575,17 @@ class ConfigLoader(object):
# Pre-load TenantProjectConfigs so we can get and cache all of a
# project's config files (incl. tenant specific extra config) at once.
- for tenant_name, unparsed_config in tenants_to_load.items():
- config_tpcs, untrusted_tpcs = (
- self.tenant_parser.loadTenantProjects(unparsed_config)
- )
- abide.clearTPCs(tenant_name)
- for tpc in config_tpcs:
- abide.addConfigTPC(tenant_name, tpc)
- for tpc in untrusted_tpcs:
- abide.addUntrustedTPC(tenant_name, tpc)
+ with ThreadPoolExecutor(max_workers=4) as executor:
+ for tenant_name, unparsed_config in tenants_to_load.items():
+ config_tpcs, untrusted_tpcs = (
+ self.tenant_parser.loadTenantProjects(unparsed_config,
+ executor)
+ )
+ abide.clearTPCs(tenant_name)
+ for tpc in config_tpcs:
+ abide.addConfigTPC(tenant_name, tpc)
+ for tpc in untrusted_tpcs:
+ abide.addUntrustedTPC(tenant_name, tpc)
def loadTenant(self, abide, tenant_name, ansible_manager, unparsed_abide,
min_ltimes=None, layout_uuid=None,
@@ -2532,9 +2664,11 @@ class ConfigLoader(object):
return None
unparsed_config = unparsed_abide.tenants[tenant_name]
- new_tenant = self.tenant_parser.fromYaml(
- abide, unparsed_config, ansible_manager, min_ltimes, layout_uuid,
- branch_cache_min_ltimes, ignore_cat_exception)
+ with ThreadPoolExecutor(max_workers=4) as executor:
+ new_tenant = self.tenant_parser.fromYaml(
+ abide, unparsed_config, ansible_manager, executor,
+ min_ltimes, layout_uuid, branch_cache_min_ltimes,
+ ignore_cat_exception)
# Copy tenants dictionary to not break concurrent iterations.
tenants = abide.tenants.copy()
tenants[tenant_name] = new_tenant
@@ -2558,7 +2692,8 @@ class ConfigLoader(object):
else:
# Use the cached branch list; since this is a dynamic
# reconfiguration there should not be any branch changes.
- branches = tenant.getProjectBranches(project.canonical_name)
+ branches = tenant.getProjectBranches(project.canonical_name,
+ include_always_dynamic=True)
for branch in branches:
fns1 = []
diff --git a/zuul/connection/__init__.py b/zuul/connection/__init__.py
index 03562102f..fb43fad4d 100644
--- a/zuul/connection/__init__.py
+++ b/zuul/connection/__init__.py
@@ -21,6 +21,10 @@ from zuul.lib.logutil import get_annotated_logger
from zuul.model import Project
+class ReadOnlyBranchCacheError(RuntimeError):
+ pass
+
+
class BaseConnection(object, metaclass=abc.ABCMeta):
"""Base class for connections.
@@ -241,15 +245,35 @@ class ZKBranchCacheMixin:
# Handle the case where tenant validation doesn't use the cache
branches = None
- if branches is not None:
+ if branches:
return sorted(branches)
if self.read_only:
+ if branches is None:
+ # A scheduler hasn't attempted to fetch them yet
+ raise ReadOnlyBranchCacheError(
+ "Will not fetch project branches as read-only is set")
+ # A scheduler has previously attempted a fetch, but got
+ # the empty list due to an error; we can't retry since
+ # we're read-only
raise RuntimeError(
"Will not fetch project branches as read-only is set")
# We need to perform a query
- branches = self._fetchProjectBranches(project, exclude_unprotected)
+ try:
+ branches = self._fetchProjectBranches(project, exclude_unprotected)
+ except Exception:
+ # We weren't able to get the branches. We need to tell
+ # future schedulers to try again but tell zuul-web that we
+ # tried and failed. Set the branches to the empty list to
+ # indicate that we have performed a fetch and retrieved no
+ # data. Any time we encounter the empty list in the
+ # cache, we will try again (since it is not reasonable to
+ # have a project with no branches).
+ if self._branch_cache:
+ self._branch_cache.setProjectBranches(
+ project.name, exclude_unprotected, [])
+ raise
self.log.info("Got branches for %s" % project.name)
if self._branch_cache:
@@ -315,14 +339,14 @@ class ZKBranchCacheMixin:
# again.
event.branch_protected = True
- def clearBranchCache(self):
+ def clearBranchCache(self, projects=None):
"""Clear the branch cache
In case the branch cache gets out of sync with the source,
this method can be called to clear it and force querying the
source the next time the cache is used.
"""
- self._branch_cache.clear()
+ self._branch_cache.clear(projects)
class ZKChangeCacheMixin:
diff --git a/zuul/driver/elasticsearch/reporter.py b/zuul/driver/elasticsearch/reporter.py
index e35bbcd48..7802cb609 100644
--- a/zuul/driver/elasticsearch/reporter.py
+++ b/zuul/driver/elasticsearch/reporter.py
@@ -30,8 +30,10 @@ class ElasticsearchReporter(BaseReporter):
self.index_vars = self.config.get('index-vars')
self.index_returned_vars = self.config.get('index-returned-vars')
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
"""Create an entry into a database."""
+ if not phase1:
+ return
docs = []
index = '%s.%s-%s' % (self.index, item.pipeline.tenant.name,
time.strftime("%Y.%m.%d"))
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 005f62b9a..6aea4388b 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -197,7 +197,13 @@ class GerritEventConnector(threading.Thread):
time.sleep(1)
def _run(self):
+ self.log.info("Won connection event queue election for %s",
+ self.connection.connection_name)
while not self._stopped and self.event_queue.election.is_still_valid():
+ qlen = len(self.event_queue)
+ if qlen:
+ self.log.debug("Connection event queue length for %s: %s",
+ self.connection.connection_name, qlen)
for event in self.event_queue:
try:
self._handleEvent(event)
@@ -207,27 +213,35 @@ class GerritEventConnector(threading.Thread):
return
self._connector_wake_event.wait(10)
self._connector_wake_event.clear()
+ self.log.info("Terminating connection event queue processing for %s",
+ self.connection.connection_name)
def _handleEvent(self, connection_event):
timestamp = connection_event["timestamp"]
data = connection_event["payload"]
+ if "zuul_event_id" in connection_event:
+ zuul_event_id = connection_event["zuul_event_id"]
+ else:
+ # TODO: This is for backwards compat; Remove after 7.0.0
+ zuul_event_id = str(uuid4().hex)
+ log = get_annotated_logger(self.log, zuul_event_id)
+ now = time.time()
+ delay = max((timestamp + self.delay) - now, 0.0)
# Gerrit can produce inconsistent data immediately after an
# event, So ensure that we do not deliver the event to Zuul
# until at least a certain amount of time has passed. Note
# that if we receive several events in succession, we will
# only need to delay for the first event. In essence, Zuul
# should always be a constant number of seconds behind Gerrit.
- now = time.time()
- time.sleep(max((timestamp + self.delay) - now, 0.0))
+
+ log.debug("Handling event received %ss ago, delaying %ss",
+ now - timestamp, delay)
+ time.sleep(delay)
event = GerritTriggerEvent()
event.timestamp = timestamp
event.connection_name = self.connection.connection_name
-
- # Gerrit events don't have an event id that could be used to globally
- # identify this event in the system so we have to generate one.
- event.zuul_event_id = str(uuid4().hex)
- log = get_annotated_logger(self.log, event)
+ event.zuul_event_id = zuul_event_id
event.type = data.get('type')
event.uuid = data.get('uuid')
@@ -578,6 +592,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
self.user = self.connection_config.get('user')
self.server = self.connection_config.get('server')
+ self.ssh_server = self.connection_config.get('ssh_server', self.server)
self.canonical_hostname = self.connection_config.get(
'canonical_hostname', self.server)
self.port = int(self.connection_config.get('port', 29418))
@@ -680,6 +695,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
"baseurl": self.baseurl,
"canonical_hostname": self.canonical_hostname,
"server": self.server,
+ "ssh_server": self.ssh_server,
"port": self.port,
})
return d
@@ -1196,23 +1212,27 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
if data.get('type') in GerritEventConnector.IGNORED_EVENTS:
return
+ # Gerrit events don't have an event id that could be used to globally
+ # identify this event in the system so we have to generate one.
event = {
"timestamp": time.time(),
+ "zuul_event_id": str(uuid4().hex),
"payload": data
}
self.event_queue.put(event)
def review(self, item, message, submit, labels, checks_api,
- file_comments, zuul_event_id=None):
+ file_comments, phase1, phase2, zuul_event_id=None):
if self.session:
meth = self.review_http
else:
meth = self.review_ssh
return meth(item, message, submit, labels, checks_api,
- file_comments, zuul_event_id=zuul_event_id)
+ file_comments, phase1, phase2,
+ zuul_event_id=zuul_event_id)
def review_ssh(self, item, message, submit, labels, checks_api,
- file_comments, zuul_event_id=None):
+ file_comments, phase1, phase2, zuul_event_id=None):
log = get_annotated_logger(self.log, zuul_event_id)
if checks_api:
log.error("Zuul is configured to report to the checks API, "
@@ -1221,23 +1241,24 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
change = item.change
project = change.project.name
cmd = 'gerrit review --project %s' % project
- if message:
- b_len = len(message.encode('utf-8'))
- if b_len >= GERRIT_HUMAN_MESSAGE_LIMIT:
- log.info("Message truncated %d > %d" %
- (b_len, GERRIT_HUMAN_MESSAGE_LIMIT))
- message = ("%s... (truncated)" %
- message[:GERRIT_HUMAN_MESSAGE_LIMIT - 20])
- cmd += ' --message %s' % shlex.quote(message)
- if submit:
+ if phase1:
+ if message:
+ b_len = len(message.encode('utf-8'))
+ if b_len >= GERRIT_HUMAN_MESSAGE_LIMIT:
+ log.info("Message truncated %d > %d" %
+ (b_len, GERRIT_HUMAN_MESSAGE_LIMIT))
+ message = ("%s... (truncated)" %
+ message[:GERRIT_HUMAN_MESSAGE_LIMIT - 20])
+ cmd += ' --message %s' % shlex.quote(message)
+ for key, val in labels.items():
+ if val is True:
+ cmd += ' --%s' % key
+ else:
+ cmd += ' --label %s=%s' % (key, val)
+ if self.version >= (2, 13, 0):
+ cmd += ' --tag autogenerated:zuul:%s' % (item.pipeline.name)
+ if phase2 and submit:
cmd += ' --submit'
- for key, val in labels.items():
- if val is True:
- cmd += ' --%s' % key
- else:
- cmd += ' --label %s=%s' % (key, val)
- if self.version >= (2, 13, 0):
- cmd += ' --tag autogenerated:zuul:%s' % (item.pipeline.name)
changeid = '%s,%s' % (change.number, change.patchset)
cmd += ' %s' % changeid
out, err = self._ssh(cmd, zuul_event_id=zuul_event_id)
@@ -1290,8 +1311,13 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
time.sleep(x * 10)
def review_http(self, item, message, submit, labels,
- checks_api, file_comments, zuul_event_id=None):
+ checks_api, file_comments, phase1, phase2,
+ zuul_event_id=None):
change = item.change
+ changeid = "%s~%s~%s" % (
+ urllib.parse.quote(str(change.project), safe=''),
+ urllib.parse.quote(str(change.branch), safe=''),
+ change.id)
log = get_annotated_logger(self.log, zuul_event_id)
b_len = len(message.encode('utf-8'))
if b_len >= GERRIT_HUMAN_MESSAGE_LIMIT:
@@ -1299,53 +1325,51 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
(b_len, GERRIT_HUMAN_MESSAGE_LIMIT))
message = ("%s... (truncated)" %
message[:GERRIT_HUMAN_MESSAGE_LIMIT - 20])
- data = dict(message=message,
- strict_labels=False)
- if change.is_current_patchset:
- if labels:
- data['labels'] = labels
- if file_comments:
- if self.version >= (2, 15, 0):
- file_comments = copy.deepcopy(file_comments)
- url = item.formatStatusUrl()
- for comments in itertools.chain(file_comments.values()):
- for comment in comments:
- comment['robot_id'] = 'zuul'
- comment['robot_run_id'] = \
- item.current_build_set.uuid
- if url:
- comment['url'] = url
- data['robot_comments'] = file_comments
- else:
- data['comments'] = file_comments
- if self.version >= (2, 13, 0):
- data['tag'] = 'autogenerated:zuul:%s' % (item.pipeline.name)
- changeid = "%s~%s~%s" % (
- urllib.parse.quote(str(change.project), safe=''),
- urllib.parse.quote(str(change.branch), safe=''),
- change.id)
- if checks_api:
- self.report_checks(log, item, changeid, checks_api)
- if (message or data.get('labels') or data.get('comments')
- or data.get('robot_comments')):
- for x in range(1, 4):
- try:
- self.post('changes/%s/revisions/%s/review' %
- (changeid, change.commit),
- data)
- break
- except HTTPConflictException:
- log.exception("Conflict submitting data to gerrit.")
- break
- except HTTPBadRequestException:
- log.exception(
- "Bad request submitting check data to gerrit.")
- break
- except Exception:
- log.exception(
- "Error submitting data to gerrit, attempt %s", x)
- time.sleep(x * 10)
- if change.is_current_patchset and submit:
+ data = dict(strict_labels=False)
+ if phase1:
+ data['message'] = message
+ if change.is_current_patchset:
+ if labels:
+ data['labels'] = labels
+ if file_comments:
+ if self.version >= (2, 15, 0):
+ file_comments = copy.deepcopy(file_comments)
+ url = item.formatStatusUrl()
+ for comments in itertools.chain(
+ file_comments.values()):
+ for comment in comments:
+ comment['robot_id'] = 'zuul'
+ comment['robot_run_id'] = \
+ item.current_build_set.uuid
+ if url:
+ comment['url'] = url
+ data['robot_comments'] = file_comments
+ else:
+ data['comments'] = file_comments
+ if self.version >= (2, 13, 0):
+ data['tag'] = 'autogenerated:zuul:%s' % (item.pipeline.name)
+ if checks_api:
+ self.report_checks(log, item, changeid, checks_api)
+ if (message or data.get('labels') or data.get('comments')
+ or data.get('robot_comments')):
+ for x in range(1, 4):
+ try:
+ self.post('changes/%s/revisions/%s/review' %
+ (changeid, change.commit),
+ data)
+ break
+ except HTTPConflictException:
+ log.exception("Conflict submitting data to gerrit.")
+ break
+ except HTTPBadRequestException:
+ log.exception(
+ "Bad request submitting check data to gerrit.")
+ break
+ except Exception:
+ log.exception(
+ "Error submitting data to gerrit, attempt %s", x)
+ time.sleep(x * 10)
+ if phase2 and change.is_current_patchset and submit:
for x in range(1, 4):
try:
self.post('changes/%s/submit' % (changeid,), {})
@@ -1535,7 +1559,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
client.set_missing_host_key_policy(paramiko.WarningPolicy())
# SSH banner, handshake, and auth timeouts default to 15
# seconds, so we only set the socket timeout here.
- client.connect(self.server,
+ client.connect(self.ssh_server,
username=self.user,
port=self.port,
key_filename=self.keyfile,
@@ -1626,7 +1650,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
baseurl = urllib.parse.urlunparse(baseurl)
url = ('%s/a/%s' % (baseurl, project.name))
else:
- url = 'ssh://%s@%s:%s/%s' % (self.user, self.server, self.port,
+ url = 'ssh://%s@%s:%s/%s' % (self.user, self.ssh_server, self.port,
project.name)
return url
@@ -1717,7 +1741,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
self.watcher_thread = GerritWatcher(
self,
self.user,
- self.server,
+ self.ssh_server,
self.port,
keyfile=self.keyfile,
keepalive=self.keepalive)
diff --git a/zuul/driver/gerrit/gerritreporter.py b/zuul/driver/gerrit/gerritreporter.py
index fbee0e0ec..984b8742a 100644
--- a/zuul/driver/gerrit/gerritreporter.py
+++ b/zuul/driver/gerrit/gerritreporter.py
@@ -35,7 +35,7 @@ class GerritReporter(BaseReporter):
self._checks_api = action.pop('checks-api', None)
self._labels = action
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
"""Send a message to gerrit."""
log = get_annotated_logger(self.log, item.event)
@@ -70,7 +70,8 @@ class GerritReporter(BaseReporter):
return self.connection.review(item, message, self._submit,
self._labels, self._checks_api,
- comments, zuul_event_id=item.event)
+ comments, phase1, phase2,
+ zuul_event_id=item.event)
def getSubmitAllowNeeds(self):
"""Get a list of code review labels that are allowed to be
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 91b107959..99631dda2 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -1267,6 +1267,21 @@ class GithubConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
self.server = self.connection_config.get('server', 'github.com')
self.canonical_hostname = self.connection_config.get(
'canonical_hostname', self.server)
+ self.repo_cache = self.connection_config.get('repo_cache')
+ if self.git_ssh_key and self.repo_cache:
+ self.log.warning("Both sshkey and repo_cache specified "
+ "but are incompatible; "
+ "repo_cache will be ignored")
+ self.repo_cache = None
+ if self.repo_cache:
+ rrt = self.connection_config.get('repo_retry_timeout')
+ if rrt:
+ self.repo_retry_timeout = int(rrt)
+ else:
+ self.repo_retry_timeout = None
+ else:
+ self.repo_retry_timeout = None
+
self.source = driver.getSource(self)
self._sha_pr_cache = GithubShaCache()
@@ -1288,6 +1303,7 @@ class GithubConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
"baseurl": self._github_client_manager.base_url,
"canonical_hostname": self.canonical_hostname,
"server": self.server,
+ "repo_cache": self.repo_cache,
})
return d
@@ -1667,6 +1683,11 @@ class GithubConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
if not self._github_client_manager.initialized:
self._github_client_manager.initialize()
+ if self.repo_cache:
+ server = self.repo_cache
+ else:
+ server = self.server
+
if self._github_client_manager.usesAppAuthentication:
# We may be in the context of a merger or executor here. The
# mergers and executors don't receive webhook events so they miss
@@ -1676,10 +1697,10 @@ class GithubConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
self._github_client_manager.get_installation_key(
project.name, reprime=True)
return 'https://x-access-token:%s@%s/%s' % (installation_key,
- self.server,
+ server,
project.name)
- return 'https://%s/%s' % (self.server, project.name)
+ return 'https://%s/%s' % (server, project.name)
def getGitwebUrl(self, project, sha=None, tag=None):
url = 'https://%s/%s' % (self.server, project)
diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py
index c5297a23a..de62f2565 100644
--- a/zuul/driver/github/githubreporter.py
+++ b/zuul/driver/github/githubreporter.py
@@ -55,7 +55,7 @@ class GithubReporter(BaseReporter):
self._unlabels = [self._unlabels]
self.context = "{}/{}".format(pipeline.tenant.name, pipeline.name)
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
"""Report on an event."""
# If the source is not GithubSource we cannot report anything here.
if not isinstance(item.change.project.source, GithubSource):
@@ -69,7 +69,7 @@ class GithubReporter(BaseReporter):
# order is important for github branch protection.
# A status should be set before a merge attempt
- if self._commit_status is not None:
+ if phase1 and self._commit_status is not None:
if (hasattr(item.change, 'patchset') and
item.change.patchset is not None):
self.setCommitStatus(item)
@@ -80,22 +80,24 @@ class GithubReporter(BaseReporter):
# If the change is not a pull request (e.g. a push) skip them.
if hasattr(item.change, 'number'):
errors_received = False
- if self._labels or self._unlabels:
- self.setLabels(item)
- if self._review:
- self.addReview(item)
- if self._check:
- check_errors = self.updateCheck(item)
- # TODO (felix): We could use this mechanism to also report back
- # errors from label and review actions
- if check_errors:
- item.current_build_set.warning_messages.extend(
- check_errors
- )
- errors_received = True
- if self._create_comment or errors_received:
- self.addPullComment(item)
- if (self._merge):
+ if phase1:
+ if self._labels or self._unlabels:
+ self.setLabels(item)
+ if self._review:
+ self.addReview(item)
+ if self._check:
+ check_errors = self.updateCheck(item)
+ # TODO (felix): We could use this mechanism to
+ # also report back errors from label and review
+ # actions
+ if check_errors:
+ item.current_build_set.warning_messages.extend(
+ check_errors
+ )
+ errors_received = True
+ if self._create_comment or errors_received:
+ self.addPullComment(item)
+ if phase2 and self._merge:
try:
self.mergePull(item)
except Exception as e:
diff --git a/zuul/driver/github/githubsource.py b/zuul/driver/github/githubsource.py
index ebf3abae0..7d4815237 100644
--- a/zuul/driver/github/githubsource.py
+++ b/zuul/driver/github/githubsource.py
@@ -150,6 +150,10 @@ class GithubSource(BaseSource):
"""Get the git url for a project."""
return self.connection.getGitUrl(project)
+ def getRetryTimeout(self, project):
+ """Get the retry timeout for a project."""
+ return self.connection.repo_retry_timeout
+
def getGitwebUrl(self, project, sha=None):
"""Get the git-web url for a project."""
return self.connection.getGitwebUrl(project, sha)
diff --git a/zuul/driver/gitlab/gitlabconnection.py b/zuul/driver/gitlab/gitlabconnection.py
index 54c3d66b8..71c5488a8 100644
--- a/zuul/driver/gitlab/gitlabconnection.py
+++ b/zuul/driver/gitlab/gitlabconnection.py
@@ -431,7 +431,8 @@ class GitlabAPIClient():
params = {}
if method == "squash":
params['squash'] = True
- resp = self.put(self.baseurl + path, zuul_event_id=zuul_event_id)
+ resp = self.put(
+ self.baseurl + path, params, zuul_event_id=zuul_event_id)
try:
self._manage_error(*resp, zuul_event_id=zuul_event_id)
if resp[0]['state'] != 'merged':
diff --git a/zuul/driver/gitlab/gitlabreporter.py b/zuul/driver/gitlab/gitlabreporter.py
index f364521f5..819c89c47 100644
--- a/zuul/driver/gitlab/gitlabreporter.py
+++ b/zuul/driver/gitlab/gitlabreporter.py
@@ -50,7 +50,7 @@ class GitlabReporter(BaseReporter):
if not isinstance(self._unlabels, list):
self._unlabels = [self._unlabels]
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
"""Report on an event."""
if not isinstance(item.change.project.source, GitlabSource):
return
@@ -60,13 +60,14 @@ class GitlabReporter(BaseReporter):
return
if hasattr(item.change, 'number'):
- if self._create_comment:
- self.addMRComment(item)
- if self._approval is not None:
- self.setApproval(item)
- if self._labels or self._unlabels:
- self.setLabels(item)
- if self._merge:
+ if phase1:
+ if self._create_comment:
+ self.addMRComment(item)
+ if self._approval is not None:
+ self.setApproval(item)
+ if self._labels or self._unlabels:
+ self.setLabels(item)
+ if phase2 and self._merge:
self.mergeMR(item)
if not item.change.is_merged:
msg = self._formatItemReportMergeConflict(item)
diff --git a/zuul/driver/mqtt/mqttreporter.py b/zuul/driver/mqtt/mqttreporter.py
index a53389337..4090bb082 100644
--- a/zuul/driver/mqtt/mqttreporter.py
+++ b/zuul/driver/mqtt/mqttreporter.py
@@ -27,7 +27,9 @@ class MQTTReporter(BaseReporter):
name = 'mqtt'
log = logging.getLogger("zuul.MQTTReporter")
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
+ if not phase1:
+ return
log = get_annotated_logger(self.log, item.event)
log.debug("Report change %s, params %s", item.change, self.config)
message = {
diff --git a/zuul/driver/pagure/pagurereporter.py b/zuul/driver/pagure/pagurereporter.py
index 918a31b6b..0bfdbc9b8 100644
--- a/zuul/driver/pagure/pagurereporter.py
+++ b/zuul/driver/pagure/pagurereporter.py
@@ -34,7 +34,7 @@ class PagureReporter(BaseReporter):
self._merge = self.config.get('merge', False)
self.context = "{}/{}".format(pipeline.tenant.name, pipeline.name)
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
"""Report on an event."""
# If the source is not PagureSource we cannot report anything here.
@@ -47,17 +47,18 @@ class PagureReporter(BaseReporter):
self.connection.canonical_hostname:
return
- if self._commit_status is not None:
- if (hasattr(item.change, 'patchset') and
- item.change.patchset is not None):
- self.setCommitStatus(item)
- elif (hasattr(item.change, 'newrev') and
- item.change.newrev is not None):
- self.setCommitStatus(item)
- if hasattr(item.change, 'number'):
- if self._create_comment:
- self.addPullComment(item)
- if self._merge:
+ if phase1:
+ if self._commit_status is not None:
+ if (hasattr(item.change, 'patchset') and
+ item.change.patchset is not None):
+ self.setCommitStatus(item)
+ elif (hasattr(item.change, 'newrev') and
+ item.change.newrev is not None):
+ self.setCommitStatus(item)
+ if hasattr(item.change, 'number'):
+ if self._create_comment:
+ self.addPullComment(item)
+ if phase2 and self._merge:
self.mergePull(item)
if not item.change.is_merged:
msg = self._formatItemReportMergeConflict(item)
diff --git a/zuul/driver/smtp/smtpreporter.py b/zuul/driver/smtp/smtpreporter.py
index 58bd25cef..4815026ab 100644
--- a/zuul/driver/smtp/smtpreporter.py
+++ b/zuul/driver/smtp/smtpreporter.py
@@ -25,8 +25,10 @@ class SMTPReporter(BaseReporter):
name = 'smtp'
log = logging.getLogger("zuul.SMTPReporter")
- def report(self, item):
+ def report(self, item, phase1=True, phase2=True):
"""Send the compiled report message via smtp."""
+ if not phase1:
+ return
log = get_annotated_logger(self.log, item.event)
message = self._formatItemReport(item)
diff --git a/zuul/driver/sql/alembic/versions/c7467b642498_buildset_updated.py b/zuul/driver/sql/alembic/versions/c7467b642498_buildset_updated.py
new file mode 100644
index 000000000..abfba7247
--- /dev/null
+++ b/zuul/driver/sql/alembic/versions/c7467b642498_buildset_updated.py
@@ -0,0 +1,47 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""buildset_updated
+
+Revision ID: c7467b642498
+Revises: 4647def24b32
+Create Date: 2022-05-28 16:21:50.035877
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c7467b642498'
+down_revision = '4647def24b32'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(table_prefix=''):
+ op.add_column(
+ table_prefix + "zuul_buildset", sa.Column('updated', sa.DateTime))
+
+ connection = op.get_bind()
+ connection.execute(
+ """
+ UPDATE {buildset_table}
+ SET updated=greatest(
+ coalesce(first_build_start_time, '1970-01-01 00:00:00'),
+ coalesce(last_build_end_time, '1970-01-01 00:00:00'),
+ coalesce(event_timestamp, '1970-01-01 00:00:00'))
+ """.format(buildset_table=table_prefix + "zuul_buildset"))
+
+
+def downgrade():
+ raise Exception("Downgrades not supported")
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 8dd5f9d44..1af50b2f7 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -173,6 +173,7 @@ class DatabaseSession(object):
def getBuildsets(self, tenant=None, project=None, pipeline=None,
change=None, branch=None, patchset=None, ref=None,
newrev=None, uuid=None, result=None, complete=None,
+ updated_max=None,
limit=50, offset=0, idx_min=None, idx_max=None):
buildset_table = self.connection.zuul_buildset_table
@@ -202,6 +203,9 @@ class DatabaseSession(object):
elif complete is False:
q = q.filter(buildset_table.c.result == None) # noqa
+ if updated_max:
+ q = q.filter(buildset_table.c.updated < updated_max)
+
q = q.order_by(buildset_table.c.id.desc()).\
limit(limit).\
offset(offset)
@@ -232,6 +236,13 @@ class DatabaseSession(object):
except sqlalchemy.orm.exc.MultipleResultsFound:
raise Exception("Multiple buildset found with uuid %s", uuid)
+ def deleteBuildsets(self, cutoff):
+ """Delete buildsets before the cutoff"""
+
+ # delete buildsets updated before the cutoff
+ for buildset in self.getBuildsets(updated_max=cutoff):
+ self.session().delete(buildset)
+
class SQLConnection(BaseConnection):
driver_name = 'sql'
@@ -349,6 +360,7 @@ class SQLConnection(BaseConnection):
event_timestamp = sa.Column(sa.DateTime, nullable=True)
first_build_start_time = sa.Column(sa.DateTime, nullable=True)
last_build_end_time = sa.Column(sa.DateTime, nullable=True)
+ updated = sa.Column(sa.DateTime, nullable=True)
sa.Index(self.table_prefix + 'project_pipeline_idx',
project, pipeline)
@@ -472,3 +484,8 @@ class SQLConnection(BaseConnection):
"""Return a BuildSet objects"""
with self.getSession() as db:
return db.getBuildset(*args, **kw)
+
+ def deleteBuildsets(self, *args, **kw):
+ """Delete buildsets"""
+ with self.getSession() as db:
+ return db.deleteBuildsets(*args, **kw)
diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py
index edce622f8..cf75a7495 100644
--- a/zuul/driver/sql/sqlreporter.py
+++ b/zuul/driver/sql/sqlreporter.py
@@ -68,6 +68,7 @@ class SQLReporter(BaseReporter):
ref_url=item.change.url,
event_id=event_id,
event_timestamp=event_timestamp,
+ updated=datetime.datetime.utcnow(),
)
return db_buildset
@@ -111,6 +112,7 @@ class SQLReporter(BaseReporter):
and build.end_time > end_time):
end_time = build.end_time
db_buildset.last_build_end_time = end_time
+ db_buildset.updated = datetime.datetime.utcnow()
return
except sqlalchemy.exc.DBAPIError:
if retry_count < self.retry_count - 1:
diff --git a/zuul/lib/ansible-config.conf b/zuul/lib/ansible-config.conf
index b942eb62f..5bc3bd325 100644
--- a/zuul/lib/ansible-config.conf
+++ b/zuul/lib/ansible-config.conf
@@ -1,7 +1,8 @@
# This file describes the currently supported ansible versions
[common]
default_version = 2.9
-requirements = openstacksdk openshift jmespath google-cloud-storage pywinrm boto3 azure-storage-blob ibm-cos-sdk netaddr
+# OpenStackSDK 0.99.0 coincides with CORS header problems in some providers
+requirements = openstacksdk<0.99 openshift jmespath google-cloud-storage pywinrm boto3 azure-storage-blob ibm-cos-sdk netaddr passlib
[2.8]
# Ansible 2.8.16 breaks the k8s connection plugin
diff --git a/zuul/lib/encryption.py b/zuul/lib/encryption.py
index 5720b9cee..79e92e366 100644
--- a/zuul/lib/encryption.py
+++ b/zuul/lib/encryption.py
@@ -20,6 +20,18 @@ from cryptography.hazmat.primitives import hashes
from functools import lru_cache
+# OpenSSL 3.0.0 performs key validation in a very slow manner. Since
+# our keys are internally generated and securely stored, we can skip
+# validation. See https://github.com/pyca/cryptography/issues/7236
+backend = default_backend()
+if hasattr(backend, '_rsa_skip_check_key'):
+ backend._rsa_skip_check_key = True
+else:
+ import logging
+ logging.warning("Cryptography backend lacks _rsa_skip_check_key flag, "
+ "key loading may be slow")
+
+
# https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#generation
def generate_rsa_keypair():
"""Generate an RSA keypair.
@@ -30,7 +42,7 @@ def generate_rsa_keypair():
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
- backend=default_backend()
+ backend=backend,
)
public_key = private_key.public_key()
return (private_key, public_key)
@@ -98,7 +110,7 @@ def deserialize_rsa_keypair(data, password=None):
private_key = serialization.load_pem_private_key(
data,
password=password,
- backend=default_backend()
+ backend=backend,
)
public_key = private_key.public_key()
return (private_key, public_key)
diff --git a/zuul/lib/fingergw.py b/zuul/lib/fingergw.py
index 90eb032ea..ad945c1b7 100644
--- a/zuul/lib/fingergw.py
+++ b/zuul/lib/fingergw.py
@@ -217,6 +217,7 @@ class FingerGateway(object):
raise
def start(self):
+ self.log.info("Starting finger gateway")
kwargs = dict(
user=self.user,
pid_file=self.pid_file,
@@ -260,6 +261,7 @@ class FingerGateway(object):
self.log.info("Finger gateway is started")
def stop(self):
+ self.log.info("Stopping finger gateway")
self.component_info.state = self.component_info.STOPPED
if self.server:
@@ -283,7 +285,7 @@ class FingerGateway(object):
self.log.info("Finger gateway is stopped")
- def wait(self):
+ def join(self):
'''
Wait on the gateway to shutdown.
'''
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index e62a62297..642aededd 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -139,13 +139,13 @@ class PipelineManager(metaclass=ABCMeta):
for project_config in layout.getAllProjectConfigs(project_name):
project_pipeline_config = project_config.pipelines.get(
self.pipeline.name)
+ if not project_queue_name:
+ project_queue_name = project_config.queue_name
if project_pipeline_config is None:
continue
project_in_pipeline = True
if not pipeline_queue_name:
pipeline_queue_name = project_pipeline_config.queue_name
- if not project_queue_name:
- project_queue_name = project_config.queue_name
if not project_in_pipeline:
continue
@@ -222,13 +222,14 @@ class PipelineManager(metaclass=ABCMeta):
def resolveChangeKeys(self, change_keys):
resolved_changes = []
for key in change_keys:
- change = self._change_cache.get(key)
+ change = self._change_cache.get(key.reference)
if change is None:
source = self.sched.connections.getSource(key.connection_name)
change = source.getChange(key)
if change is None:
self.log.error("Unable to resolve change from key %s", key)
- if isinstance(change, model.Change):
+ if (isinstance(change, model.Change)
+ and change.commit_needs_changes is None):
self.updateCommitDependencies(change, None, event=None)
self._change_cache[change.cache_key] = change
resolved_changes.append(change)
@@ -341,18 +342,19 @@ class PipelineManager(metaclass=ABCMeta):
self.reportNormalBuildsetEnd(item.current_build_set, 'dequeue',
final=False)
- def sendReport(self, action_reporters, item, message=None):
+ def sendReport(self, action_reporters, item, phase1=True, phase2=True):
"""Sends the built message off to configured reporters.
- Takes the action_reporters, item, message and extra options and
- sends them to the pluggable reporters.
+ Takes the action_reporters and item and sends them to the
+ pluggable reporters.
+
"""
log = get_annotated_logger(self.log, item.event)
report_errors = []
if len(action_reporters) > 0:
for reporter in action_reporters:
try:
- ret = reporter.report(item)
+ ret = reporter.report(item, phase1=phase1, phase2=phase2)
if ret:
report_errors.append(ret)
except Exception as e:
@@ -851,8 +853,7 @@ class PipelineManager(metaclass=ABCMeta):
new_topic_needs_changes.append(dep.cache_key)
update_attrs['topic_needs_changes'] = new_topic_needs_changes
- if change.commit_needs_changes != new_commit_needs_changes:
- source.setChangeAttributes(change, **update_attrs)
+ source.setChangeAttributes(change, **update_attrs)
def provisionNodes(self, item):
log = item.annotateLogger(self.log)
@@ -951,7 +952,7 @@ class PipelineManager(metaclass=ABCMeta):
jobs_to_cancel = item.getJobs()
for job in jobs_to_cancel:
- self.sched.cancelJob(old_build_set, job)
+ self.sched.cancelJob(old_build_set, job, final=True)
# Don't reset builds for a failing bundle when it has already started
# reporting, to keep available build results. Those items will be
@@ -1448,6 +1449,7 @@ class PipelineManager(metaclass=ABCMeta):
if build_set.repo_state_state == build_set.PENDING:
return False
+ item.deduplicateJobs(log)
return True
def _processOneItem(self, item, nnfi):
@@ -1567,8 +1569,23 @@ class PipelineManager(metaclass=ABCMeta):
item.bundle.started_reporting = can_report
if can_report:
+ # If we're starting to report a successful bundle, enable
+ # two-phase reporting. Report the first phase for every item
+ # in the bundle, then the second.
+ phase1 = True
+ phase2 = True
+ if (self.changes_merge
+ and item.bundle
+ and (not item.cannotMergeBundle())
+ and (not item.isBundleFailing())):
+ for i in item.bundle.items:
+ if not i.reported:
+ self.log.debug("Report phase1 for bundle item %s", i)
+ self.reportItem(i, phase1=True, phase2=False)
+ phase1 = False
+
try:
- self.reportItem(item)
+ self.reportItem(item, phase1=phase1, phase2=phase2)
except exceptions.MergeFailure:
failing_reasons.append("it did not merge")
for item_behind in item.items_behind:
@@ -1576,7 +1593,7 @@ class PipelineManager(metaclass=ABCMeta):
"item ahead, %s, failed to merge" %
(item_behind.change, item))
self.cancelJobs(item_behind)
- # Only re-reported items in the cycle when we encounter a merge
+ # Only re-report items in the cycle when we encounter a merge
# failure for a successful bundle.
if (item.bundle and not (
item.isBundleFailing() or item.cannotMergeBundle())):
@@ -1746,25 +1763,46 @@ class PipelineManager(metaclass=ABCMeta):
build, item)
return
- item.setResult(build)
- log.debug("Item %s status is now:\n %s", item, item.formatStatus())
+ # If the build was for deduplicated jobs, apply the results to
+ # all the items that use this build.
+ build_in_items = [item]
+ if item.bundle:
+ for other_item in item.bundle.items:
+ if other_item not in build_in_items:
+ if other_item.current_build_set.getBuild(build.job.name):
+ build_in_items.append(other_item)
+ for item in build_in_items:
+ # We don't care about some actions below if this build
+ # isn't in the current buildset, so determine that before
+ # it is potentially removed with setResult.
+ if item.current_build_set.getBuild(build.job.name) is not build:
+ current = False
+ else:
+ current = True
+ item.setResult(build)
+ log.debug("Item %s status is now:\n %s", item, item.formatStatus())
- if build.retry:
- if build.build_set.getJobNodeSetInfo(build.job.name):
- build.build_set.removeJobNodeSetInfo(build.job.name)
+ if not current:
+ continue
+ build_set = item.current_build_set
- # in case this was a paused build we need to retry all child jobs
- self._resetDependentBuilds(build.build_set, build)
+ if build.retry:
+ if build_set.getJobNodeSetInfo(build.job.name):
+ build_set.removeJobNodeSetInfo(build.job.name)
- self._resumeBuilds(build.build_set)
+ # in case this was a paused build we need to retry all
+ # child jobs
+ self._resetDependentBuilds(build_set, build)
- if (item.current_build_set.fail_fast and
- build.failed and build.job.voting and not build.retry):
- # If fail-fast is set and the build is not successful
- # cancel all remaining jobs.
- log.debug("Build %s failed and fail-fast enabled, canceling "
- "running builds", build)
- self._cancelRunningBuilds(build.build_set)
+ self._resumeBuilds(build_set)
+
+ if (build_set.fail_fast and
+ build.failed and build.job.voting and not build.retry):
+ # If fail-fast is set and the build is not successful
+ # cancel all remaining jobs.
+ log.debug("Build %s failed and fail-fast enabled, canceling "
+ "running builds", build)
+ self._cancelRunningBuilds(build_set)
return True
@@ -1863,13 +1901,19 @@ class PipelineManager(metaclass=ABCMeta):
"with nodes %s",
request, request.job_name, build_set.item, request.nodes)
- def reportItem(self, item):
+ def reportItem(self, item, phase1=True, phase2=True):
log = get_annotated_logger(self.log, item.event)
action = None
- if not item.reported:
- action, reported = self._reportItem(item)
+
+ already_reported = item.reported
+ if phase2 and not phase1:
+ already_reported = False
+ if not already_reported:
+ action, reported = self._reportItem(item, phase1, phase2)
item.updateAttributes(self.current_context,
reported=reported)
+ if not phase2:
+ return
if self.changes_merge:
succeeded = item.didAllJobsSucceed() and not item.isBundleFailing()
merged = item.reported
@@ -1893,7 +1937,7 @@ class PipelineManager(metaclass=ABCMeta):
error_reason = "failed tests"
else:
error_reason = "failed to merge"
- log.info("Reported change %s did not merge because it %s,"
+ log.info("Reported change %s did not merge because it %s, "
"status: all-succeeded: %s, merged: %s",
item.change, error_reason, succeeded, merged)
if not succeeded:
@@ -1918,9 +1962,10 @@ class PipelineManager(metaclass=ABCMeta):
self.reportNormalBuildsetEnd(item.current_build_set,
action, final=True)
- def _reportItem(self, item):
+ def _reportItem(self, item, phase1, phase2):
log = get_annotated_logger(self.log, item.event)
- log.debug("Reporting change %s", item.change)
+ log.debug("Reporting phase1: %s phase2: %s change: %s",
+ phase1, phase2, item.change)
ret = True # Means error as returned by trigger.report
# In the case of failure, we may not have completed an initial
@@ -1968,7 +2013,7 @@ class PipelineManager(metaclass=ABCMeta):
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
elif item.wasDequeuedMissingRequirements():
- log.debug("Dequeued missing requirements")
+ log.debug("Dequeued missing merge requirements")
action = 'failure'
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
@@ -2016,7 +2061,7 @@ class PipelineManager(metaclass=ABCMeta):
self.current_context, disabled=True)
if actions:
log.info("Reporting item %s, actions: %s", item, actions)
- ret = self.sendReport(actions, item)
+ ret = self.sendReport(actions, item, phase1, phase2)
if ret:
log.error("Reporting item %s received: %s", item, ret)
return action, (not ret)
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index ce1c2b3e5..c3e20e21b 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -19,6 +19,7 @@ from typing import Optional
from urllib.parse import urlsplit, urlunsplit, urlparse
import hashlib
import logging
+import math
import os
import re
import shutil
@@ -74,7 +75,7 @@ class Repo(object):
def __init__(self, remote, local, email, username, speed_limit, speed_time,
sshkey=None, cache_path=None, logger=None, git_timeout=300,
- zuul_event_id=None):
+ zuul_event_id=None, retry_timeout=None):
if logger is None:
self.log = logging.getLogger("zuul.Repo")
else:
@@ -85,6 +86,9 @@ class Repo(object):
'GIT_HTTP_LOW_SPEED_TIME': speed_time,
}
self.git_timeout = git_timeout
+ if retry_timeout:
+ self.retry_attempts = math.ceil(
+ retry_timeout / self.retry_interval)
self.sshkey = sshkey
if sshkey:
self.env['GIT_SSH_COMMAND'] = 'ssh -i %s' % (sshkey,)
@@ -555,9 +559,15 @@ class Repo(object):
def cherryPick(self, ref, zuul_event_id=None):
log = get_annotated_logger(self.log, zuul_event_id)
repo = self.createRepoObject(zuul_event_id)
- log.debug("Cherry-picking %s", ref)
self.fetch(ref, zuul_event_id=zuul_event_id)
- repo.git.cherry_pick("FETCH_HEAD")
+ if len(repo.commit("FETCH_HEAD").parents) > 1:
+ args = ["-s", "resolve", "FETCH_HEAD"]
+ log.debug("Merging %s with args %s instead of cherry-picking",
+ ref, args)
+ repo.git.merge(*args)
+ else:
+ log.debug("Cherry-picking %s", ref)
+ repo.git.cherry_pick("FETCH_HEAD")
return repo.head.commit
def merge(self, ref, strategy=None, zuul_event_id=None):
@@ -859,7 +869,7 @@ class Merger(object):
f.write(self.scheme)
def _addProject(self, hostname, connection_name, project_name, url, sshkey,
- zuul_event_id, process_worker=None):
+ zuul_event_id, process_worker=None, retry_timeout=None):
repo = None
key = '/'.join([hostname, project_name])
try:
@@ -878,7 +888,7 @@ class Merger(object):
url, path, self.email, self.username, self.speed_limit,
self.speed_time, sshkey=sshkey, cache_path=cache_path,
logger=self.logger, git_timeout=self.git_timeout,
- zuul_event_id=zuul_event_id)
+ zuul_event_id=zuul_event_id, retry_timeout=retry_timeout)
self.repos[key] = repo
except Exception:
@@ -893,6 +903,7 @@ class Merger(object):
project = source.getProject(project_name)
hostname = project.canonical_hostname
url = source.getGitUrl(project)
+ retry_timeout = source.getRetryTimeout(project)
key = '/'.join([hostname, project_name])
if key in self.repos:
repo = self.repos[key]
@@ -906,7 +917,8 @@ class Merger(object):
(connection_name, project_name,))
return self._addProject(hostname, connection_name, project_name, url,
sshkey, zuul_event_id,
- process_worker=process_worker)
+ process_worker=process_worker,
+ retry_timeout=retry_timeout)
def updateRepo(self, connection_name, project_name, repo_state=None,
zuul_event_id=None, build=None, process_worker=None):
diff --git a/zuul/model.py b/zuul/model.py
index 5eb8e59cb..aa814ce6c 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -886,6 +886,9 @@ class PipelineSummary(zkobject.ShardedZKObject):
# while zuul-web tries to read it.
try:
super().refresh(context)
+ except NoNodeError:
+ self.log.warning("No pipeline summary found "
+ "(may not be created yet)")
except Exception:
self.log.exception("Failed to refresh data")
return self.status
@@ -2005,6 +2008,7 @@ class FrozenJob(zkobject.ZKObject):
'requires',
'workspace_scheme',
'config_hash',
+ 'deduplicate',
)
job_data_attributes = ('artifact_data',
@@ -2018,9 +2022,33 @@ class FrozenJob(zkobject.ZKObject):
'affected_projects',
)
+ def __init__(self):
+ super().__init__()
+ self._set(_ready_to_run=False)
+
def __repr__(self):
return '<FrozenJob %s>' % (self.name)
+ def isEqual(self, other):
+ # Compare two frozen jobs to determine whether they are
+ # effectively equal. The inheritance path will always be
+ # different, so it is ignored. But if otherwise they have the
+ # same attributes, they will probably produce the same
+ # results.
+ if not isinstance(other, FrozenJob):
+ return False
+ if self.name != other.name:
+ return False
+ for k in self.attributes:
+ if k in ['inheritance_path', 'waiting_status', 'queued']:
+ continue
+ if getattr(self, k) != getattr(other, k):
+ return False
+ for k in self.job_data_attributes:
+ if getattr(self, k) != getattr(other, k):
+ return False
+ return True
+
@classmethod
def new(klass, context, **kw):
obj = klass()
@@ -2098,6 +2126,10 @@ class FrozenJob(zkobject.ZKObject):
def deserialize(self, raw, context):
data = super().deserialize(raw, context)
+ # MODEL_API < 8
+ if 'deduplicate' not in data:
+ data['deduplicate'] = 'auto'
+
if hasattr(self, 'nodeset'):
nodeset = self.nodeset
else:
@@ -2367,6 +2399,7 @@ class Job(ConfigObject):
self.cleanup_run))
d['post_review'] = self.post_review
d['match_on_config_updates'] = self.match_on_config_updates
+ d['deduplicate'] = self.deduplicate
if self.isBase():
d['parent'] = None
elif self.parent:
@@ -2404,6 +2437,7 @@ class Job(ConfigObject):
irrelevant_file_matcher=None, # skip-if
_irrelevant_files=(),
match_on_config_updates=True,
+ deduplicate='auto',
tags=frozenset(),
provides=frozenset(),
requires=frozenset(),
@@ -2674,6 +2708,8 @@ class Job(ConfigObject):
job=self.name,
maxnodes=layout.tenant.max_nodes_per_job))
+ for dependency in self.dependencies:
+ layout.getJob(dependency.name)
for pb in self.pre_run + self.run + self.post_run + self.cleanup_run:
pb.validateReferences(layout)
@@ -3041,6 +3077,16 @@ class JobDependency(ConfigObject):
self.name = name
self.soft = soft
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, JobDependency):
+ return False
+ return self.toDict() == other.toDict()
+
+ __hash__ = object.__hash__
+
def toDict(self):
return {'name': self.name,
'soft': self.soft}
@@ -3698,6 +3744,7 @@ class BuildSet(zkobject.ZKObject):
fail_fast=False,
job_graph=None,
jobs={},
+ deduplicated_jobs=[],
# Cached job graph of previous layout; not serialized
_old_job_graph=None,
_old_jobs={},
@@ -3877,18 +3924,20 @@ class BuildSet(zkobject.ZKObject):
# If we have a current build before refreshing, we may
# be able to skip refreshing some items since they
# will not have changed.
+ build_path = data["builds"].get(job_name)
old_build = self.builds.get(job_name)
+ old_build_exists = (old_build
+ and old_build.getPath() == build_path)
if job_name in self.jobs:
job = self.jobs[job_name]
- if not old_build:
+ if not old_build_exists:
job.refresh(context)
else:
job_path = FrozenJob.jobPath(job_name, self.getPath())
job = FrozenJob.fromZK(context, job_path, buildset=self)
self.jobs[job_name] = job
- build_path = data["builds"].get(job_name)
if build_path:
build = self.builds.get(job_name)
if build and build.getPath() == build_path:
@@ -4028,7 +4077,22 @@ class BuildSet(zkobject.ZKObject):
def removeJobNodeRequestID(self, job_name):
if job_name in self.node_requests:
- del self.node_requests[job_name]
+ with self.activeContext(
+ self.item.pipeline.manager.current_context):
+ del self.node_requests[job_name]
+
+ def setJobNodeRequestDuplicate(self, job_name, other_item):
+ with self.activeContext(
+ self.item.pipeline.manager.current_context):
+ self.node_requests[job_name] = {
+ 'deduplicated_item': other_item.uuid}
+
+ def setJobNodeSetInfoDuplicate(self, job_name, other_item):
+ # Nothing uses this value yet; we just need an entry in the
+ # nodset_info dict.
+ with self.activeContext(self.item.pipeline.manager.current_context):
+ self.nodeset_info[job_name] = {
+ 'deduplicated_item': other_item.uuid}
def jobNodeRequestComplete(self, job_name, nodeset):
if job_name in self.nodeset_info:
@@ -4415,7 +4479,7 @@ class QueueItem(zkobject.ZKObject):
continue
build = self.current_build_set.getBuild(job.name)
if (build and build.result and
- build.result not in ['SUCCESS', 'SKIPPED']):
+ build.result not in ['SUCCESS', 'SKIPPED', 'RETRY']):
return True
return False
@@ -4626,7 +4690,8 @@ class QueueItem(zkobject.ZKObject):
data = []
ret = self.item_ahead.providesRequirements(job, data)
data.reverse()
- job.setArtifactData(data)
+ if data:
+ job.setArtifactData(data)
except RequirementsError as e:
self.warning(str(e))
fakebuild = Build.new(self.pipeline.manager.current_context,
@@ -4637,24 +4702,36 @@ class QueueItem(zkobject.ZKObject):
ret = False
return ret
- def findJobsToRun(self, semaphore_handler):
- torun = []
- if not self.live:
- return []
- if not self.current_build_set.job_graph:
- return []
- if self.item_ahead:
- # Only run jobs if any 'hold' jobs on the change ahead
- # have completed successfully.
- if self.item_ahead.isHoldingFollowingChanges():
- return []
+ def findDuplicateJob(self, job):
+ """
+ If another item in the bundle has a duplicate job,
+ return the other item
+ """
+ if not self.bundle:
+ return None
+ if job.deduplicate is False:
+ return None
+ for other_item in self.bundle.items:
+ if other_item is self:
+ continue
+ for other_job in other_item.getJobs():
+ if other_job.isEqual(job):
+ if job.deduplicate == 'auto':
+ # Deduplicate if there are required projects
+ # or the item project is the same.
+ if (not job.required_projects and
+ self.change.project != other_item.change.project):
+ continue
+ return other_item
+ def updateJobParentData(self):
job_graph = self.current_build_set.job_graph
failed_job_names = set() # Jobs that run and failed
ignored_job_names = set() # Jobs that were skipped or canceled
unexecuted_job_names = set() # Jobs that were not started yet
jobs_not_started = set()
for job in job_graph.getJobs():
+ job._set(_ready_to_run=False)
build = self.current_build_set.getBuild(job.name)
if build:
if build.result == 'SUCCESS' or build.paused:
@@ -4667,8 +4744,6 @@ class QueueItem(zkobject.ZKObject):
unexecuted_job_names.add(job.name)
jobs_not_started.add(job)
- # Attempt to run jobs in the order they appear in
- # configuration.
for job in job_graph.getJobs():
if job not in jobs_not_started:
continue
@@ -4719,7 +4794,91 @@ class QueueItem(zkobject.ZKObject):
job.setParentData(new_parent_data,
new_secret_parent_data,
new_artifact_data)
+ job._set(_ready_to_run=True)
+
+ def deduplicateJobs(self, log):
+ """Sync node request and build info with deduplicated jobs"""
+ if not self.live:
+ return
+ if not self.current_build_set.job_graph:
+ return
+ if self.item_ahead:
+ # Only run jobs if any 'hold' jobs on the change ahead
+ # have completed successfully.
+ if self.item_ahead.isHoldingFollowingChanges():
+ return
+
+ self.updateJobParentData()
+
+ if COMPONENT_REGISTRY.model_api < 8:
+ return
+
+ if not self.bundle:
+ return
+
+ build_set = self.current_build_set
+ job_graph = build_set.job_graph
+ for job in job_graph.getJobs():
+ this_request = build_set.getJobNodeRequestID(job.name)
+ this_nodeset = build_set.getJobNodeSetInfo(job.name)
+ this_build = build_set.getBuild(job.name)
+
+ if this_build:
+ # Nothing more possible for this job
+ continue
+
+ other_item = self.findDuplicateJob(job)
+ if not other_item:
+ continue
+ other_build_set = other_item.current_build_set
+
+ # Handle node requests
+ other_request = other_build_set.getJobNodeRequestID(job.name)
+ if (isinstance(other_request, dict) and
+ other_request.get('deduplicated_item') == self.uuid):
+ # We're the original, but we're probably in the middle
+ # of a retry
+ return
+ if other_request is not None and this_request is None:
+ log.info("Deduplicating request of bundle job %s for item %s "
+ "with item %s", job, self, other_item)
+ build_set.setJobNodeRequestDuplicate(job.name, other_item)
+
+ # Handle provisioned nodes
+ other_nodeset = other_build_set.getJobNodeSetInfo(job.name)
+ if (isinstance(other_nodeset, dict) and
+ other_nodeset.get('deduplicated_item') == self.uuid):
+ # We're the original, but we're probably in the middle
+ # of a retry
+ return
+ if other_nodeset is not None and this_nodeset is None:
+ log.info("Deduplicating nodeset of bundle job %s for item %s "
+ "with item %s", job, self, other_item)
+ build_set.setJobNodeSetInfoDuplicate(job.name, other_item)
+
+ # Handle builds
+ other_build = other_build_set.getBuild(job.name)
+ if other_build and not this_build:
+ log.info("Deduplicating build of bundle job %s for item %s "
+ "with item %s", job, self, other_item)
+ self.addBuild(other_build)
+ job._set(_ready_to_run=False)
+
+ def findJobsToRun(self, semaphore_handler):
+ torun = []
+ if not self.live:
+ return []
+ if not self.current_build_set.job_graph:
+ return []
+ if self.item_ahead:
+ # Only run jobs if any 'hold' jobs on the change ahead
+ # have completed successfully.
+ if self.item_ahead.isHoldingFollowingChanges():
+ return []
+ job_graph = self.current_build_set.job_graph
+ for job in job_graph.getJobs():
+ if job._ready_to_run:
nodeset = self.current_build_set.getJobNodeSetInfo(job.name)
if nodeset is None:
# The nodes for this job are not ready, skip
@@ -5240,10 +5399,12 @@ class QueueItem(zkobject.ZKObject):
return False
def getBlobKeys(self):
+ job_graph = self.current_build_set.job_graph
+ if not job_graph:
+ return set()
# Return a set of blob keys used by this item
# for each job in the frozen job graph
keys = set()
- job_graph = self.current_build_set.job_graph
for job in job_graph.getJobs():
for pb in job.all_playbooks:
for secret in pb['secrets'].values():
@@ -5387,6 +5548,9 @@ class Ref(object):
tpc = tenant.project_configs.get(self.project.canonical_name)
if tpc is None:
return False
+ if hasattr(self, 'branch'):
+ if tpc.isAlwaysDynamicBranch(self.branch):
+ return True
if self.files is None:
# If self.files is None we don't know if this change updates the
# config so assume it does as this is a safe default if we don't
@@ -6309,9 +6473,13 @@ class TenantProjectConfig(object):
self.load_classes = set()
self.shadow_projects = set()
self.branches = []
+ self.dynamic_branches = []
# The tenant's default setting of exclude_unprotected_branches will
# be overridden by this one if not None.
self.exclude_unprotected_branches = None
+ self.include_branches = None
+ self.exclude_branches = None
+ self.always_dynamic_branches = None
self.parsed_branch_config = {} # branch -> ParsedConfig
# The list of paths to look for extra zuul config files
self.extra_config_files = ()
@@ -6320,6 +6488,35 @@ class TenantProjectConfig(object):
# Load config from a different branch if this is a config project
self.load_branch = None
+ def isAlwaysDynamicBranch(self, branch):
+ if self.always_dynamic_branches is None:
+ return False
+ for r in self.always_dynamic_branches:
+ if r.fullmatch(branch):
+ return True
+
+ def includesBranch(self, branch):
+ if self.include_branches is not None:
+ included = False
+ for r in self.include_branches:
+ if r.fullmatch(branch):
+ included = True
+ break
+ else:
+ included = True
+ if not included:
+ return False
+
+ excluded = False
+ if self.exclude_branches is not None:
+ for r in self.exclude_branches:
+ if r.fullmatch(branch):
+ excluded = True
+ break
+ if excluded:
+ return False
+ return True
+
class ProjectPipelineConfig(ConfigObject):
# Represents a project cofiguration in the context of a pipeline
@@ -6650,11 +6847,13 @@ class UnparsedAbideConfig(object):
self.ltime = -1
self.tenants = {}
self.admin_rules = []
+ self.semaphores = []
def extend(self, conf):
if isinstance(conf, UnparsedAbideConfig):
self.tenants.update(conf.tenants)
self.admin_rules.extend(conf.admin_rules)
+ self.semaphores.extend(conf.semaphores)
return
if not isinstance(conf, list):
@@ -6673,6 +6872,8 @@ class UnparsedAbideConfig(object):
self.tenants[value["name"]] = value
elif key == 'admin-rule':
self.admin_rules.append(value)
+ elif key == 'global-semaphore':
+ self.semaphores.append(value)
else:
raise ConfigItemUnknownError(item)
@@ -6681,6 +6882,7 @@ class UnparsedAbideConfig(object):
"uuid": self.uuid,
"tenants": self.tenants,
"admin_rules": self.admin_rules,
+ "semaphores": self.semaphores,
}
@classmethod
@@ -6690,6 +6892,7 @@ class UnparsedAbideConfig(object):
unparsed_abide.ltime = ltime
unparsed_abide.tenants = data["tenants"]
unparsed_abide.admin_rules = data["admin_rules"]
+ unparsed_abide.semaphores = data.get("semaphores", [])
return unparsed_abide
@@ -6961,6 +7164,9 @@ class Layout(object):
# It's ok to have a duplicate semaphore definition, but only if
# they are in different branches of the same repo, and have
# the same values.
+ if semaphore.name in self.tenant.global_semaphores:
+ raise Exception("Semaphore %s shadows a global semaphore and "
+ "will be ignored" % (semaphore.name))
other = self.semaphores.get(semaphore.name)
if other is not None:
if not semaphore.source_context.isSameProject(
@@ -6980,6 +7186,19 @@ class Layout(object):
return
self.semaphores[semaphore.name] = semaphore
+ def getSemaphore(self, abide, semaphore_name):
+ if semaphore_name in self.tenant.global_semaphores:
+ return abide.semaphores[semaphore_name]
+ semaphore = self.semaphores.get(semaphore_name)
+ if semaphore:
+ return semaphore
+ # Return an implied semaphore with max=1
+ # TODO: consider deprecating implied semaphores to avoid typo
+ # config errors
+ semaphore = Semaphore(semaphore_name)
+ semaphore.freeze()
+ return semaphore
+
def addQueue(self, queue):
# Change queues must be unique and cannot be overridden.
if queue.name in self.queues:
@@ -7364,9 +7583,10 @@ class Layout(object):
class Semaphore(ConfigObject):
- def __init__(self, name, max=1):
+ def __init__(self, name, max=1, global_scope=False):
super(Semaphore, self).__init__()
self.name = name
+ self.global_scope = global_scope
self.max = int(max)
def __ne__(self, other):
@@ -7442,6 +7662,7 @@ class Tenant(object):
self.authorization_rules = []
self.default_auth_realm = None
+ self.global_semaphores = set()
def __repr__(self):
return f"<Tenant {self.name}>"
@@ -7550,16 +7771,21 @@ class Tenant(object):
(project,))
return result
- def getProjectBranches(self, project_canonical_name):
+ def getProjectBranches(self, project_canonical_name,
+ include_always_dynamic=False):
"""Return a project's branches (filtered by this tenant config)
:arg str project_canonical: The project's canonical name.
+ :arg bool include_always_dynamic: Whether to include
+ always-dynamic-branches
:returns: A list of branch names.
:rtype: [str]
"""
tpc = self.project_configs[project_canonical_name]
+ if include_always_dynamic:
+ return tpc.branches + tpc.dynamic_branches
return tpc.branches
def getExcludeUnprotectedBranches(self, project):
@@ -7645,6 +7871,7 @@ class UnparsedBranchCache(object):
class Abide(object):
def __init__(self):
self.admin_rules = {}
+ self.semaphores = {}
self.tenants = {}
# tenant -> project -> list(tpcs)
# The project TPCs are stored as a list as we don't check for
diff --git a/zuul/model_api.py b/zuul/model_api.py
index 05286dad5..0534ee9c4 100644
--- a/zuul/model_api.py
+++ b/zuul/model_api.py
@@ -14,4 +14,4 @@
# When making ZK schema changes, increment this and add a record to
# docs/developer/model-changelog.rst
-MODEL_API = 7
+MODEL_API = 8
diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py
index 93b218519..9b8f2c11c 100644
--- a/zuul/reporter/__init__.py
+++ b/zuul/reporter/__init__.py
@@ -35,8 +35,19 @@ class BaseReporter(object, metaclass=abc.ABCMeta):
self._action = action
@abc.abstractmethod
- def report(self, item):
- """Send the compiled report message."""
+ def report(self, item, phase1=True, phase2=True):
+ """Send the compiled report message
+
+ Two-phase reporting may be enabled if one or the other of the
+ `phase1` or `phase2` arguments is False.
+
+ Phase1 should report everything except the actual merge action.
+ Phase2 should report only the merge action.
+
+ :arg phase1 bool: Whether to enable phase1 reporting
+ :arg phase2 bool: Whether to enable phase2 reporting
+
+ """
def getSubmitAllowNeeds(self):
"""Get a list of code review labels that are allowed to be
@@ -172,6 +183,9 @@ class BaseReporter(object, metaclass=abc.ABCMeta):
def _formatItemReportSuccess(self, item, with_jobs=True):
msg = item.pipeline.success_message
if with_jobs:
+ status_url = item.formatStatusUrl()
+ if status_url is not None:
+ msg += '\n' + status_url
msg += '\n\n' + self._formatItemReportJobs(item)
return msg
@@ -182,7 +196,7 @@ class BaseReporter(object, metaclass=abc.ABCMeta):
msg = 'This change depends on a change that failed to merge.\n'
elif item.dequeued_missing_requirements:
msg = ('This change is unable to merge '
- 'due to a missing requirement.\n')
+ 'due to a missing merge requirement.\n')
elif item.isBundleFailing():
msg = 'This change is part of a bundle that failed.\n'
if with_jobs:
@@ -196,6 +210,9 @@ class BaseReporter(object, metaclass=abc.ABCMeta):
else:
msg = item.pipeline.failure_message
if with_jobs:
+ status_url = item.formatStatusUrl()
+ if status_url is not None:
+ msg += '\n' + status_url
msg += '\n\n' + self._formatItemReportJobs(item)
return msg
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 0654c3d5d..bfabcecc2 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import itertools
import logging
import socket
import sys
@@ -183,9 +184,11 @@ class Scheduler(threading.Thread):
_merger_client_class = MergeClient
_executor_client_class = ExecutorClient
- def __init__(self, config, connections, app, testonly=False):
+ def __init__(self, config, connections, app, wait_for_init,
+ testonly=False):
threading.Thread.__init__(self)
self.daemon = True
+ self.wait_for_init = wait_for_init
self.hostname = socket.getfqdn()
self.primed_event = threading.Event()
# Wake up the main run loop
@@ -1295,6 +1298,7 @@ class Scheduler(threading.Thread):
try:
abide = Abide()
loader.loadAdminRules(abide, unparsed_abide)
+ loader.loadSemaphores(abide, unparsed_abide)
loader.loadTPCs(abide, unparsed_abide)
for tenant_name in tenants_to_load:
loader.loadTenant(abide, tenant_name, self.ansible_manager,
@@ -1352,8 +1356,9 @@ class Scheduler(threading.Thread):
for tenant_name in deleted_tenants:
self.abide.clearTPCs(tenant_name)
- loader.loadTPCs(self.abide, self.unparsed_abide)
loader.loadAdminRules(self.abide, self.unparsed_abide)
+ loader.loadSemaphores(self.abide, self.unparsed_abide)
+ loader.loadTPCs(self.abide, self.unparsed_abide)
if event.smart:
# Consider caches always valid
@@ -1365,10 +1370,27 @@ class Scheduler(threading.Thread):
# Consider caches valid if the cache ltime >= event ltime
min_ltimes = defaultdict(
lambda: defaultdict(lambda: event.zuul_event_ltime))
- # Invalidate the branch cache for all connections
+ # Invalidate the branch cache
for connection in self.connections.connections.values():
if hasattr(connection, 'clearBranchCache'):
- connection.clearBranchCache()
+ if event.tenants:
+ # Only clear the projects used by this
+ # tenant (zuul-web won't be able to load
+ # any tenants that we don't immediately
+ # reconfigure after clearing)
+ for tenant_name in event.tenants:
+ projects = [
+ tpc.project.name for tpc in
+ itertools.chain(
+ self.abide.getConfigTPCs(tenant_name),
+ self.abide.getUntrustedTPCs(
+ tenant_name))
+ ]
+ connection.clearBranchCache(projects)
+ else:
+ # Clear all projects since we're reloading
+ # all tenants.
+ connection.clearBranchCache()
ltime = self.zk_client.getCurrentLtime()
# Consider the branch cache valid only after we
# cleared it
@@ -1883,6 +1905,9 @@ class Scheduler(threading.Thread):
self.log.debug("Statsd enabled")
else:
self.log.debug("Statsd not configured")
+ if self.wait_for_init:
+ self.log.debug("Waiting for tenant initialization")
+ self.primed_event.wait()
while True:
self.log.debug("Run handler sleeping")
self.wake_event.wait()
@@ -1981,8 +2006,9 @@ class Scheduler(threading.Thread):
tenant_config, from_script=script)
self.system_config_cache.set(self.unparsed_abide, self.globals)
- loader.loadTPCs(self.abide, self.unparsed_abide)
loader.loadAdminRules(self.abide, self.unparsed_abide)
+ loader.loadSemaphores(self.abide, self.unparsed_abide)
+ loader.loadTPCs(self.abide, self.unparsed_abide)
def updateSystemConfig(self):
with self.layout_lock:
@@ -2001,8 +2027,9 @@ class Scheduler(threading.Thread):
for tenant_name in deleted_tenants:
self.abide.clearTPCs(tenant_name)
- loader.loadTPCs(self.abide, self.unparsed_abide)
loader.loadAdminRules(self.abide, self.unparsed_abide)
+ loader.loadSemaphores(self.abide, self.unparsed_abide)
+ loader.loadTPCs(self.abide, self.unparsed_abide)
def process_pipelines(self, tenant, tenant_lock):
for pipeline in tenant.layout.pipelines.values():
@@ -2220,6 +2247,12 @@ class Scheduler(threading.Thread):
if tpc and not tpc.load_classes:
reconfigure_tenant = False
+ # If we are listing included branches and this branch
+ # is not included, skip reconfig.
+ if (reconfigure_tenant and
+ not tpc.includesBranch(event.branch)):
+ reconfigure_tenant = False
+
# But if the event is that branch protection status has
# changed, do reconfigure.
if (event.isBranchProtectionChanged()):
diff --git a/zuul/source/__init__.py b/zuul/source/__init__.py
index b1b15a6ef..5ccfa07b6 100644
--- a/zuul/source/__init__.py
+++ b/zuul/source/__init__.py
@@ -142,13 +142,21 @@ class BaseSource(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def getProjectOpenChanges(self, project):
-
"""Get the open changes for a project."""
@abc.abstractmethod
def getGitUrl(self, project):
"""Get the git url for a project."""
+ def getRetryTimeout(self, project):
+ """Get the retry timeout for a project in seconds.
+
+ This is used by the mergers to potentially increase the number
+ of git fetch retries before giving up. Return None to use the
+ default.
+ """
+ return None
+
@abc.abstractmethod
def getProject(self, name):
"""Get a project."""
diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py
index b031ee020..f06dd0b52 100755
--- a/zuul/web/__init__.py
+++ b/zuul/web/__init__.py
@@ -36,7 +36,7 @@ import prometheus_client
import zuul.executor.common
from zuul import exceptions
from zuul.configloader import ConfigLoader
-from zuul.connection import BaseConnection
+from zuul.connection import BaseConnection, ReadOnlyBranchCacheError
import zuul.lib.repl
from zuul.lib import commandsocket, encryption, streamer_utils
from zuul.lib.ansible import AnsibleManager
@@ -360,11 +360,15 @@ class ZuulWebAPI(object):
self.system = ZuulSystem(self.zk_client)
self.zk_nodepool = ZooKeeperNodepool(self.zk_client,
enable_node_cache=True)
- self.cache = {}
- self.cache_time = {}
+ self.status_caches = {}
+ self.status_cache_times = {}
+ self.status_cache_locks = defaultdict(threading.Lock)
+ self.tenants_cache = []
+ self.tenants_cache_time = 0
+ self.tenants_cache_lock = threading.Lock()
+
self.cache_expiry = 1
self.static_cache_expiry = zuulweb.static_cache_expiry
- self.status_lock = defaultdict(threading.Lock)
@property
def log(self):
@@ -569,6 +573,7 @@ class ZuulWebAPI(object):
@cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
def autohold_list(self, tenant_name, *args, **kwargs):
# we don't use json_in because a payload is not mandatory with GET
+ _ = self._getTenantOrRaise(tenant_name)
if cherrypy.request.method != 'GET':
raise cherrypy.HTTPError(405)
# filter by project if passed as a query string
@@ -582,6 +587,7 @@ class ZuulWebAPI(object):
# we don't use json_in because a payload is not mandatory with GET
# Note: GET handling is redundant with autohold_list
# and could be removed.
+ tenant = self._getTenantOrRaise(tenant_name)
if cherrypy.request.method == 'GET':
return self._autohold_list(tenant_name, project_name)
elif cherrypy.request.method == 'POST':
@@ -593,6 +599,8 @@ class ZuulWebAPI(object):
if token_error is not None:
return token_error
self.isAuthorizedOrRaise(claims, tenant_name)
+ project = self._getProjectOrRaise(tenant, project_name)
+
msg = 'User "%s" requesting "%s" on %s/%s'
self.log.info(
msg % (claims['__zuul_uid_claim'], 'autohold',
@@ -619,9 +627,6 @@ class ZuulWebAPI(object):
if count < 0:
raise cherrypy.HTTPError(400, "Count must be greater 0")
- tenant = self._getTenantOrRaise(tenant_name)
- project = self._getProjectOrRaise(tenant, project_name)
-
project_name = project.canonical_name
if jbody['change']:
@@ -940,9 +945,7 @@ class ZuulWebAPI(object):
return [n for n, t in self.zuulweb.abide.tenants.items()
if self._is_authorized(t, claims)]
- @cherrypy.expose
- @cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
- def tenants(self):
+ def _tenants(self):
result = []
for tenant_name, tenant in sorted(self.zuulweb.abide.tenants.items()):
queue_size = 0
@@ -959,10 +962,26 @@ class ZuulWebAPI(object):
'projects': len(tenant.untrusted_projects),
'queue': queue_size,
})
+ return result
+
+ @cherrypy.expose
+ @cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
+ def tenants(self):
+ cache_time = self.tenants_cache_time
+ if time.time() - cache_time > self.cache_expiry:
+ with self.tenants_cache_lock:
+ self.tenants_cache = self._tenants()
+ self.tenants_cache_time = time.time()
resp = cherrypy.response
+ resp.headers["Cache-Control"] = f"public, max-age={self.cache_expiry}"
+ last_modified = datetime.utcfromtimestamp(
+ self.tenants_cache_time
+ )
+ last_modified_header = last_modified.strftime('%a, %d %b %Y %X GMT')
+ resp.headers["Last-modified"] = last_modified_header
resp.headers['Access-Control-Allow-Origin'] = '*'
- return result
+ return self.tenants_cache
@cherrypy.expose
@cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
@@ -991,25 +1010,32 @@ class ZuulWebAPI(object):
def _getStatus(self, tenant_name):
tenant = self._getTenantOrRaise(tenant_name)
- if tenant_name not in self.cache or \
- (time.time() - self.cache_time[tenant_name]) > self.cache_expiry:
- if self.status_lock[tenant_name].acquire(blocking=False):
+ cache_time = self.status_cache_times.get(tenant_name, 0)
+ if tenant_name not in self.status_cache_locks or \
+ (time.time() - cache_time) > self.cache_expiry:
+ if self.status_cache_locks[tenant_name].acquire(
+ blocking=False
+ ):
try:
- self.cache[tenant_name] = self.formatStatus(tenant)
- self.cache_time[tenant_name] = time.time()
+ self.status_caches[tenant_name] =\
+ self.formatStatus(tenant)
+ self.status_cache_times[tenant_name] =\
+ time.time()
finally:
- self.status_lock[tenant_name].release()
- if not self.cache.get(tenant_name):
+ self.status_cache_locks[tenant_name].release()
+ if not self.status_caches.get(tenant_name):
# If the cache is empty at this point it means that we didn't
# get the lock but another thread is initializing the cache
# for the first time. In this case we just wait for the lock
# to wait for it to finish.
- with self.status_lock[tenant_name]:
+ with self.status_cache_locks[tenant_name]:
pass
- payload = self.cache[tenant_name]
+ payload = self.status_caches[tenant_name]
resp = cherrypy.response
resp.headers["Cache-Control"] = f"public, max-age={self.cache_expiry}"
- last_modified = datetime.utcfromtimestamp(self.cache_time[tenant_name])
+ last_modified = datetime.utcfromtimestamp(
+ self.status_cache_times[tenant_name]
+ )
last_modified_header = last_modified.strftime('%a, %d %b %Y %X GMT')
resp.headers["Last-modified"] = last_modified_header
resp.headers['Access-Control-Allow-Origin'] = '*'
@@ -1819,6 +1845,7 @@ class ZuulWeb(object):
route_map = cherrypy.dispatch.RoutesDispatcher()
api = ZuulWebAPI(self)
+ self.api = api
route_map.connect('api', '/api',
controller=api, action='index')
route_map.connect('api', '/api/info',
@@ -2048,7 +2075,11 @@ class ZuulWeb(object):
if not self._system_config_running:
return
self.updateSystemConfig()
- self.updateLayout()
+ if not self.updateLayout():
+ # Branch cache errors with at least one tenant,
+ # try again.
+ time.sleep(10)
+ self.system_config_cache_wake_event.set()
except Exception:
self.log.exception("Exception while updating system config")
@@ -2070,8 +2101,9 @@ class ZuulWeb(object):
for tenant_name in deleted_tenants:
self.abide.clearTPCs(tenant_name)
- loader.loadTPCs(self.abide, self.unparsed_abide)
loader.loadAdminRules(self.abide, self.unparsed_abide)
+ loader.loadSemaphores(self.abide, self.unparsed_abide)
+ loader.loadTPCs(self.abide, self.unparsed_abide)
def updateLayout(self):
self.log.debug("Updating layout state")
@@ -2084,38 +2116,51 @@ class ZuulWeb(object):
tenant_names = set(self.abide.tenants)
tenant_names.update(self.unparsed_abide.tenants.keys())
+ success = True
for tenant_name in tenant_names:
# Reload the tenant if the layout changed.
- if (self.local_layout_state.get(tenant_name)
- == self.tenant_layout_state.get(tenant_name)):
- continue
- self.log.debug("Reloading tenant %s", tenant_name)
- with tenant_read_lock(self.zk_client, tenant_name):
- layout_state = self.tenant_layout_state.get(tenant_name)
- layout_uuid = layout_state and layout_state.uuid
-
- if layout_state:
- min_ltimes = self.tenant_layout_state.getMinLtimes(
- layout_state)
- branch_cache_min_ltimes = (
- layout_state.branch_cache_min_ltimes)
- else:
- # Consider all project branch caches valid if
- # we don't have a layout state.
- min_ltimes = defaultdict(
- lambda: defaultdict(lambda: -1))
- branch_cache_min_ltimes = defaultdict(lambda: -1)
-
- # The tenant will be stored in self.abide.tenants after
- # it was loaded.
- tenant = loader.loadTenant(
- self.abide, tenant_name, self.ansible_manager,
- self.unparsed_abide, min_ltimes=min_ltimes,
- layout_uuid=layout_uuid,
- branch_cache_min_ltimes=branch_cache_min_ltimes)
- if tenant is not None:
- self.local_layout_state[tenant_name] = layout_state
- else:
- with suppress(KeyError):
- del self.local_layout_state[tenant_name]
+ try:
+ self._updateTenantLayout(loader, tenant_name)
+ except ReadOnlyBranchCacheError:
+ self.log.info(
+ "Unable to update layout due to incomplete branch "
+ "cache, possibly due to in-progress tenant "
+ "reconfiguration; will retry")
+ success = False
self.log.debug("Done updating layout state")
+ return success
+
+ def _updateTenantLayout(self, loader, tenant_name):
+ # Reload the tenant if the layout changed.
+ if (self.local_layout_state.get(tenant_name)
+ == self.tenant_layout_state.get(tenant_name)):
+ return
+ self.log.debug("Reloading tenant %s", tenant_name)
+ with tenant_read_lock(self.zk_client, tenant_name):
+ layout_state = self.tenant_layout_state.get(tenant_name)
+ layout_uuid = layout_state and layout_state.uuid
+
+ if layout_state:
+ min_ltimes = self.tenant_layout_state.getMinLtimes(
+ layout_state)
+ branch_cache_min_ltimes = (
+ layout_state.branch_cache_min_ltimes)
+ else:
+ # Consider all project branch caches valid if
+ # we don't have a layout state.
+ min_ltimes = defaultdict(
+ lambda: defaultdict(lambda: -1))
+ branch_cache_min_ltimes = defaultdict(lambda: -1)
+
+ # The tenant will be stored in self.abide.tenants after
+ # it was loaded.
+ tenant = loader.loadTenant(
+ self.abide, tenant_name, self.ansible_manager,
+ self.unparsed_abide, min_ltimes=min_ltimes,
+ layout_uuid=layout_uuid,
+ branch_cache_min_ltimes=branch_cache_min_ltimes)
+ if tenant is not None:
+ self.local_layout_state[tenant_name] = layout_state
+ else:
+ with suppress(KeyError):
+ del self.local_layout_state[tenant_name]
diff --git a/zuul/zk/__init__.py b/zuul/zk/__init__.py
index 195197c5f..8ddc7511d 100644
--- a/zuul/zk/__init__.py
+++ b/zuul/zk/__init__.py
@@ -17,7 +17,7 @@ from threading import Thread
from typing import List, Callable
from kazoo.client import KazooClient
-from kazoo.exceptions import NoNodeError
+from kazoo.exceptions import NoNodeError, NodeExistsError
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.protocol.states import KazooState
@@ -211,8 +211,11 @@ class ZooKeeperClient(object):
try:
zstat = self.client.set("/zuul/ltime", b"")
except NoNodeError:
- self.client.create("/zuul/ltime", b"", makepath=True)
- zstat = self.client.set("/zuul/ltime", b"")
+ try:
+ self.client.create("/zuul/ltime", b"", makepath=True)
+ zstat = self.client.set("/zuul/ltime", b"")
+ except NodeExistsError:
+ zstat = self.client.set("/zuul/ltime", b"")
return zstat.last_modified_transaction_id
diff --git a/zuul/zk/branch_cache.py b/zuul/zk/branch_cache.py
index 5600802e9..08f0d6046 100644
--- a/zuul/zk/branch_cache.py
+++ b/zuul/zk/branch_cache.py
@@ -104,12 +104,17 @@ class BranchCache:
self.cache = BranchCacheZKObject.new(
self.zk_context, _path=data_path)
- def clear(self):
+ def clear(self, projects=None):
"""Clear the cache"""
with locked(self.wlock):
with self.cache.activeContext(self.zk_context):
- self.cache.protected.clear()
- self.cache.remainder.clear()
+ if projects is None:
+ self.cache.protected.clear()
+ self.cache.remainder.clear()
+ else:
+ for p in projects:
+ self.cache.protected.pop(p, None)
+ self.cache.remainder.pop(p, None)
def getProjectBranches(self, project_name, exclude_unprotected,
min_ltime=-1):
diff --git a/zuul/zk/event_queues.py b/zuul/zk/event_queues.py
index 0e5cba987..8718f609c 100644
--- a/zuul/zk/event_queues.py
+++ b/zuul/zk/event_queues.py
@@ -28,6 +28,7 @@ from kazoo.protocol.states import EventType
from zuul import model
from zuul.lib.collections import DefaultKeyDict
+from zuul.lib.logutil import get_annotated_logger
from zuul.zk import ZooKeeperSimpleBase, sharding
from zuul.zk.election import SessionAwareElection
@@ -422,7 +423,7 @@ class ZooKeeperEventQueue(ZooKeeperSimpleBase, Iterable):
self.log.error("Removing side channel data: %s", path)
self.kazoo_client.delete(path, recursive=True)
except Exception:
- self.log.execption(
+ self.log.exception(
"Unable to delete side channel data %s", path)
except Exception:
self.log.exception("Error cleaning up event queue %s", self)
@@ -853,7 +854,7 @@ class PipelineTriggerEventQueue(TriggerEventQueue):
class ConnectionEventQueue(ZooKeeperEventQueue):
"""Connection events via ZooKeeper"""
- log = logging.getLogger("zuul.zk.event_queues.ConnectionEventQueue")
+ log = logging.getLogger("zuul.ConnectionEventQueue")
def __init__(self, client, connection_name):
queue_root = "/".join((CONNECTION_ROOT, connection_name, "events"))
@@ -875,8 +876,11 @@ class ConnectionEventQueue(ZooKeeperEventQueue):
)
def put(self, data):
- self.log.debug("Submitting connection event to queue %s: %s",
- self.event_root, data)
+ log = self.log
+ if "zuul_event_id" in data:
+ log = get_annotated_logger(log, data["zuul_event_id"])
+ log.debug("Submitting connection event to queue %s: %s",
+ self.event_root, data)
self._put({'event_data': data})
def __iter__(self):
diff --git a/zuul/zk/job_request_queue.py b/zuul/zk/job_request_queue.py
index 902afce2a..175c57b90 100644
--- a/zuul/zk/job_request_queue.py
+++ b/zuul/zk/job_request_queue.py
@@ -572,7 +572,7 @@ class JobRequestQueue(ZooKeeperSimpleBase):
self.log.error("Removing request params: %s", path)
self.kazoo_client.delete(path, recursive=True)
except Exception:
- self.log.execption(
+ self.log.exception(
"Unable to delete request params %s", path)
except Exception:
self.log.exception(
@@ -585,7 +585,7 @@ class JobRequestQueue(ZooKeeperSimpleBase):
self.log.error("Removing request result: %s", path)
self.kazoo_client.delete(path, recursive=True)
except Exception:
- self.log.execption(
+ self.log.exception(
"Unable to delete request params %s", result_id)
for result_id in lost_data:
try:
@@ -594,7 +594,7 @@ class JobRequestQueue(ZooKeeperSimpleBase):
"Removing request result data: %s", path)
self.kazoo_client.delete(path, recursive=True)
except Exception:
- self.log.execption(
+ self.log.exception(
"Unable to delete request params %s", result_id)
except Exception:
self.log.exception(
@@ -608,7 +608,7 @@ class JobRequestQueue(ZooKeeperSimpleBase):
self.log.error("Removing stale lock: %s", lock_path)
self.kazoo_client.delete(lock_path, recursive=True)
except Exception:
- self.log.execption(
+ self.log.exception(
"Unable to delete lock %s", path)
except Exception:
self.log.exception("Error cleaning up locks %s", self)
diff --git a/zuul/zk/semaphore.py b/zuul/zk/semaphore.py
index bffc3ea8a..721a0438a 100644
--- a/zuul/zk/semaphore.py
+++ b/zuul/zk/semaphore.py
@@ -39,14 +39,23 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
log = logging.getLogger("zuul.zk.SemaphoreHandler")
semaphore_root = "/zuul/semaphores"
+ global_semaphore_root = "/zuul/global-semaphores"
- def __init__(self, client, statsd, tenant_name, layout):
+ def __init__(self, client, statsd, tenant_name, layout, abide):
super().__init__(client)
+ self.abide = abide
self.layout = layout
self.statsd = statsd
self.tenant_name = tenant_name
self.tenant_root = f"{self.semaphore_root}/{tenant_name}"
+ def _makePath(self, semaphore):
+ semaphore_key = quote_plus(semaphore.name)
+ if semaphore.global_scope:
+ return f"{self.global_semaphore_root}/{semaphore_key}"
+ else:
+ return f"{self.tenant_root}/{semaphore_key}"
+
def _emitStats(self, semaphore_path, num_holders):
if self.statsd is None:
return
@@ -80,8 +89,8 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
return False
return True
- def _acquire_one(self, log, item, job, request_resources, semaphore):
- if semaphore.resources_first and request_resources:
+ def _acquire_one(self, log, item, job, request_resources, job_semaphore):
+ if job_semaphore.resources_first and request_resources:
# We're currently in the resource request phase and want to get the
# resources before locking. So we don't need to do anything here.
return True
@@ -92,8 +101,8 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
# the resources phase.
pass
- semaphore_key = quote_plus(semaphore.name)
- semaphore_path = f"{self.tenant_root}/{semaphore_key}"
+ semaphore = self.layout.getSemaphore(self.abide, job_semaphore.name)
+ semaphore_path = self._makePath(semaphore)
semaphore_handle = {
"buildset_path": item.current_build_set.getPath(),
"job_name": job.name,
@@ -139,10 +148,13 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
return holdersFromData(data), zstat
def getSemaphores(self):
- try:
- return self.kazoo_client.get_children(self.tenant_root)
- except NoNodeError:
- return []
+ ret = []
+ for root in (self.global_semaphore_root, self.tenant_root):
+ try:
+ ret.extend(self.kazoo_client.get_children(root))
+ except NoNodeError:
+ pass
+ return ret
def _release(self, log, semaphore_path, semaphore_handle, quiet,
legacy_handle=None):
@@ -183,23 +195,31 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
log = get_annotated_logger(self.log, item.event)
- for semaphore in job.semaphores:
- self._release_one(log, item, job, semaphore, quiet)
+ for job_semaphore in job.semaphores:
+ self._release_one(log, item, job, job_semaphore, quiet)
# If a scheduler has been provided (which it is except in the
# case of a rollback from acquire in this class), broadcast an
# event to trigger pipeline runs.
if sched is None:
return
- for pipeline_name in self.layout.pipelines.keys():
- event = PipelineSemaphoreReleaseEvent()
- sched.pipeline_management_events[
- self.tenant_name][pipeline_name].put(
- event, needs_result=False)
- def _release_one(self, log, item, job, semaphore, quiet):
- semaphore_key = quote_plus(semaphore.name)
- semaphore_path = f"{self.tenant_root}/{semaphore_key}"
+ semaphore = self.layout.getSemaphore(self.abide, job_semaphore.name)
+ if semaphore.global_scope:
+ tenants = [t for t in self.abide.tenants.values()
+ if job_semaphore.name in t.global_semaphores]
+ else:
+ tenants = [self.abide.tenants[self.tenant_name]]
+ for tenant in tenants:
+ for pipeline_name in tenant.layout.pipelines.keys():
+ event = PipelineSemaphoreReleaseEvent()
+ sched.pipeline_management_events[
+ tenant.name][pipeline_name].put(
+ event, needs_result=False)
+
+ def _release_one(self, log, item, job, job_semaphore, quiet):
+ semaphore = self.layout.getSemaphore(self.abide, job_semaphore.name)
+ semaphore_path = self._makePath(semaphore)
semaphore_handle = {
"buildset_path": item.current_build_set.getPath(),
"job_name": job.name,
@@ -209,16 +229,16 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
legacy_handle)
def semaphoreHolders(self, semaphore_name):
- semaphore_key = quote_plus(semaphore_name)
- semaphore_path = f"{self.tenant_root}/{semaphore_key}"
+ semaphore = self.layout.getSemaphore(self.abide, semaphore_name)
+ semaphore_path = self._makePath(semaphore)
try:
holders, _ = self.getHolders(semaphore_path)
except NoNodeError:
holders = []
return holders
- def _max_count(self, semaphore_name: str) -> int:
- semaphore = self.layout.semaphores.get(semaphore_name)
+ def _max_count(self, semaphore_name):
+ semaphore = self.layout.getSemaphore(self.abide, semaphore_name)
return 1 if semaphore is None else semaphore.max
def cleanupLeaks(self):
@@ -240,8 +260,9 @@ class SemaphoreHandler(ZooKeeperSimpleBase):
is not None):
continue
- semaphore_key = quote_plus(semaphore_name)
- semaphore_path = f"{self.tenant_root}/{semaphore_key}"
+ semaphore = self.layout.getSemaphore(
+ self.abide, semaphore_name)
+ semaphore_path = self._makePath(semaphore)
self.log.error("Releasing leaked semaphore %s held by %s",
semaphore_path, holder)
self._release(self.log, semaphore_path, holder, quiet=False)