summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml22
-rw-r--r--Dockerfile13
-rw-r--r--bindep.txt14
-rw-r--r--doc/source/discussion/components.rst13
-rw-r--r--doc/source/discussion/github-checks-api.rst170
-rw-r--r--doc/source/discussion/index.rst1
-rw-r--r--doc/source/examples/docker-compose.yaml10
-rw-r--r--doc/source/examples/etc_nodepool/nodepool.yaml5
-rw-r--r--doc/source/examples/etc_zuul/zuul.conf5
-rw-r--r--doc/source/examples/pipelines/gerrit-reference-pipelines.yaml18
-rw-r--r--doc/source/examples/playbooks/setup.yaml4
-rw-r--r--doc/source/examples/zoo.cfg15
-rw-r--r--doc/source/howtos/admin.rst1
-rw-r--r--doc/source/howtos/zookeeper.rst118
-rw-r--r--doc/source/reference/developer/specs/kubernetes-operator.rst42
-rw-r--r--doc/source/reference/drivers/github.rst15
-rw-r--r--doc/source/reference/drivers/pagure.rst48
-rw-r--r--doc/source/reference/glossary.rst65
-rw-r--r--doc/source/reference/governance.rst3
-rw-r--r--doc/source/reference/job_def.rst2
-rw-r--r--doc/source/reference/jobs.rst5
-rw-r--r--doc/source/reference/pipeline_def.rst40
-rw-r--r--doc/source/tutorials/quick-start.rst9
-rw-r--r--playbooks/quick-start/run.yaml2
-rw-r--r--playbooks/release/pre.yaml4
-rw-r--r--releasenotes/notes/ansible-27-deprecated-cd82a8a47a10a8c7.yaml5
-rw-r--r--releasenotes/notes/file-comment-levels-9b26156addc3644f.yaml10
-rw-r--r--releasenotes/notes/github-require-check-294d3f27da790fae.yaml6
-rw-r--r--releasenotes/notes/serial-manager-8f2dcf924c72effe.yaml6
-rw-r--r--releasenotes/notes/strip-encrypt-secret-87b06dc887312117.yaml8
-rw-r--r--releasenotes/notes/zookeeper-tls-2748002ff9afbf77.yaml10
-rw-r--r--releasenotes/notes/zuul-ansible-environment-secrets-983e8ced908b294d.yaml5
-rw-r--r--requirements.txt4
-rw-r--r--setup.cfg2
-rw-r--r--tests/base.py424
-rw-r--r--tests/fakegithub.py11
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/common-config/playbooks/job.yaml2
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/common-config/zuul.yaml23
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/org_project1/README1
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/org_project2/README1
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/org_project2/zuul.yaml3
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/org_project3/README1
-rw-r--r--tests/fixtures/config/broken-multi-tenant/git/org_project3/zuul.yaml2
-rw-r--r--tests/fixtures/config/broken-multi-tenant/main.yaml19
-rw-r--r--tests/fixtures/config/gerrit-file-comments/git/org_project/playbooks/file-comments.yaml3
-rw-r--r--tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments-error.yaml3
-rw-r--r--tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments.yaml5
-rw-r--r--tests/fixtures/config/zuul-environment-filter/git/common-config/playbooks/zuul-environment-filter.yaml10
-rw-r--r--tests/fixtures/config/zuul-environment-filter/git/common-config/zuul.yaml21
-rw-r--r--tests/fixtures/config/zuul-environment-filter/main.yaml6
-rw-r--r--tests/fixtures/config/zuultrigger/project-change-merged/git/common-config/zuul.yaml4
-rw-r--r--tests/fixtures/layouts/requirements-github.yaml29
-rw-r--r--tests/fixtures/layouts/requirements-pagure.yaml20
-rw-r--r--tests/fixtures/layouts/serial.yaml48
-rw-r--r--tests/fixtures/zuul-admin-web-oidc.conf45
-rw-r--r--tests/fixtures/zuul-pagure-driver-whitelist.conf19
-rw-r--r--tests/unit/test_configloader.py99
-rw-r--r--tests/unit/test_connection.py6
-rw-r--r--tests/unit/test_cross_crd.py25
-rw-r--r--tests/unit/test_executor.py47
-rw-r--r--tests/unit/test_gerrit.py23
-rw-r--r--tests/unit/test_gerrit_crd.py14
-rw-r--r--tests/unit/test_gerrit_legacy_crd.py12
-rw-r--r--tests/unit/test_git_driver.py14
-rw-r--r--tests/unit/test_github_crd.py2
-rw-r--r--tests/unit/test_github_driver.py94
-rw-r--r--tests/unit/test_github_requirements.py45
-rw-r--r--tests/unit/test_inventory.py8
-rw-r--r--tests/unit/test_lib_ansible.py33
-rw-r--r--tests/unit/test_merger_repo.py49
-rw-r--r--tests/unit/test_pagure_driver.py113
-rw-r--r--tests/unit/test_scheduler.py348
-rw-r--r--tests/unit/test_serial.py123
-rw-r--r--tests/unit/test_v3.py118
-rw-r--r--tests/unit/test_web.py90
-rw-r--r--tests/unit/test_zuultrigger.py26
-rw-r--r--tools/2472D6D0.asc21
-rwxr-xr-xtools/encrypt_secret.py9
-rw-r--r--tools/openssl.cnf352
-rwxr-xr-xtools/pip.sh13
-rwxr-xr-xtools/zk-ca.sh103
-rw-r--r--tox.ini25
-rw-r--r--web/src/containers/build/BuildOutput.jsx18
-rw-r--r--web/src/containers/job/JobVariant.jsx4
-rw-r--r--web/src/containers/status/ChangePanel.jsx44
-rw-r--r--web/src/pages/ConfigErrors.jsx2
-rw-r--r--web/src/pages/Status.jsx8
-rw-r--r--zuul/ansible/base/callback/zuul_stream.py5
-rw-r--r--zuul/ansible/logconfig.py4
-rwxr-xr-xzuul/cmd/scheduler.py13
-rwxr-xr-xzuul/cmd/web.py14
-rw-r--r--zuul/configloader.py12
-rw-r--r--zuul/driver/auth/jwt.py17
-rw-r--r--zuul/driver/gerrit/gerritconnection.py25
-rw-r--r--zuul/driver/gerrit/gerritsource.py4
-rw-r--r--zuul/driver/github/githubconnection.py36
-rw-r--r--zuul/driver/github/githubsource.py2
-rw-r--r--zuul/driver/gitlab/gitlabsource.py2
-rw-r--r--zuul/driver/pagure/pagureconnection.py282
-rw-r--r--zuul/driver/pagure/paguresource.py2
-rw-r--r--zuul/driver/sql/__init__.py3
-rw-r--r--zuul/executor/server.py34
-rw-r--r--zuul/lib/ansible-config.conf1
-rw-r--r--zuul/lib/ansible.py61
-rw-r--r--zuul/lib/auth.py18
-rw-r--r--zuul/lib/capabilities.py51
-rw-r--r--zuul/lib/filecomments.py3
-rw-r--r--zuul/lib/yamlutil.py5
-rw-r--r--zuul/manager/__init__.py33
-rw-r--r--zuul/manager/dependent.py79
-rw-r--r--zuul/manager/serial.py37
-rw-r--r--zuul/manager/shared.py84
-rw-r--r--zuul/merger/merger.py91
-rw-r--r--zuul/model.py21
-rw-r--r--zuul/scheduler.py50
-rwxr-xr-xzuul/web/__init__.py26
-rw-r--r--zuul/zk.py19
117 files changed, 3258 insertions, 1064 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index d4a4151d5..913d34965 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -99,12 +99,10 @@
# because that is where the docker-compose file is located.
- job:
name: zuul-quick-start
+ parent: opendev-buildset-registry-consumer
description: Run the commands in the Zuul quick-start documentation.
run: playbooks/quick-start/run.yaml
post-run: playbooks/quick-start/post.yaml
- requires:
- - zuul-container-image
- - nodepool-container-image
required-projects:
- zuul/zuul
@@ -132,8 +130,8 @@
allowed-projects: zuul/zuul
timeout: 2700 # 45 minutes
requires:
- - python-builder-container-image
- - python-base-container-image
+ - python-builder-3.7-container-image
+ - python-base-3.7-container-image
provides: zuul-container-image
vars: &zuul_image_vars
docker_images:
@@ -162,8 +160,8 @@
description: Build Docker images and upload to Docker Hub.
allowed-projects: zuul/zuul
requires:
- - python-builder-container-image
- - python-base-container-image
+ - python-builder-3.7-container-image
+ - python-base-3.7-container-image
provides: zuul-container-image
secrets:
name: docker_credentials
@@ -203,7 +201,9 @@
jobs:
- zuul-build-image
- zuul-tox-docs
- - tox-pep8
+ - tox-linters:
+ vars:
+ tox_install_bindep: false
- tox-py35:
irrelevant-files:
- zuul/cmd/migrate.py
@@ -246,6 +246,7 @@
- zuul-tox-remote:
timeout: 2700 # 45 minutes
- zuul-quick-start:
+ requires: nodepool-container-image
dependencies: zuul-build-image
- nodepool-zuul-functional:
voting: false
@@ -259,7 +260,9 @@
jobs:
- zuul-upload-image
- zuul-tox-docs
- - tox-pep8
+ - tox-linters:
+ vars:
+ tox_install_bindep: false
- tox-py35:
irrelevant-files:
- zuul/cmd/migrate.py
@@ -301,6 +304,7 @@
- zuul-tox-remote:
timeout: 2700 # 45 minutes
- zuul-quick-start:
+ requires: nodepool-container-image
dependencies: zuul-upload-image
- zuul-build-python-release
- build-javascript-content-tarball:
diff --git a/Dockerfile b/Dockerfile
index 78b68512b..e1a30d5b3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM docker.io/opendevorg/python-builder as builder
+FROM docker.io/opendevorg/python-builder:3.7 as builder
# Optional location of Zuul API endpoint.
ARG REACT_APP_ZUUL_API
@@ -39,7 +39,7 @@ RUN mkdir /tmp/openshift-install \
&& echo $OPENSHIFT_SHA /tmp/openshift-install/openshift-client.tgz | sha256sum --check \
&& tar xvfz openshift-client.tgz --strip-components=1 -C /tmp/openshift-install
-FROM docker.io/opendevorg/python-base as zuul
+FROM docker.io/opendevorg/python-base:3.7 as zuul
COPY --from=builder /output/ /output
RUN /output/install-from-bindep \
@@ -55,6 +55,15 @@ COPY --from=builder /usr/local/lib/zuul/ /usr/local/lib/zuul
COPY --from=builder /tmp/openshift-install/kubectl /usr/local/bin/kubectl
COPY --from=builder /tmp/openshift-install/oc /usr/local/bin/oc
+# https://podman.io/getting-started/installation.html
+COPY tools/2472D6D0.asc /etc/apt/trusted.gpg.d/kubic.asc
+RUN echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list \
+ && apt-get update \
+ && apt-get install -y \
+ skopeo \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
CMD ["/usr/local/bin/zuul-executor", "-f"]
FROM zuul as zuul-fingergw
diff --git a/bindep.txt b/bindep.txt
index 3248cbaab..5c1641660 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -26,10 +26,15 @@ libssl1.1 [platform:debian platform:ubuntu-bionic]
libssl1.0.0 [platform:ubuntu-xenial]
libffi-dev [compile test platform:dpkg platform:apk]
libffi [platform:apk]
-gmp [platform:apk]
-procps [platform:apk]
-libffi6 [platform:dpkg]
+libffi7 [platform:ubuntu-focal]
+libffi6 [platform:dpkg !platform:ubuntu-focal]
libffi-devel [compile test platform:rpm]
+libyaml-0-2 [platform:dpkg platform:suse]
+libyaml [platform:redhat]
+libyaml-dev [platform:dpkg compile test]
+libyaml-devel [platform:rpm compile test]
+gmp [platform:apk]
+procps [platform:apk platform:dpkg]
python3-dev [compile test platform:dpkg platform:apk]
python3-devel [compile test platform:rpm]
python3.7 [test platform:ubuntu-bionic]
@@ -50,8 +55,7 @@ git
rsync
curl
gettext [platform:apk]
-unzip [platform:apk]
-unzip [platform:rpm]
+unzip
bzip2
openssh-client [platform:debian platform:apk]
openssh [platform:rpm]
diff --git a/doc/source/discussion/components.rst b/doc/source/discussion/components.rst
index 5d93bd932..ba36c89c6 100644
--- a/doc/source/discussion/components.rst
+++ b/doc/source/discussion/components.rst
@@ -64,7 +64,7 @@ TCP port 4730 by default.
The Zuul scheduler communicates with Nodepool via the ZooKeeper
protocol. Nodepool requires an external ZooKeeper cluster, and the
Zuul scheduler needs to be able to connect to the hosts in that
-cluster on TCP port 2181.
+cluster on TCP port 2181 or 2281.
Both the Nodepool launchers and Zuul executors need to be able to
communicate with the hosts which nodepool provides. If these are on
@@ -193,6 +193,17 @@ The following sections of ``zuul.conf`` are used by all Zuul components:
The ZooKeeper session timeout, in seconds.
+ .. attr:: tls_cert
+
+ If using TLS, the path to the PEM encoded certificate file.
+
+ .. attr:: tls_key
+
+ If using TLS, the path to the PEM encoded key file.
+
+ .. attr:: tls_ca
+
+ If using TLS, the path to the PEM encoded CA certificate file.
.. _scheduler:
diff --git a/doc/source/discussion/github-checks-api.rst b/doc/source/discussion/github-checks-api.rst
new file mode 100644
index 000000000..73788a5d2
--- /dev/null
+++ b/doc/source/discussion/github-checks-api.rst
@@ -0,0 +1,170 @@
+:title: Github Checks API
+
+Github Checks API
+=================
+
+Using the `Github Checks API`_ to report job results back to a PR provides
+some additional features compared to the status API like file comments and
+custom actions. The latter one could be used to e.g. cancel a running
+build.
+
+Design decisions
+-----------------
+
+The github checks API consists mainly of two entities: `Check Suites`_ and
+`Check Runs`_. Check suites are a collection of check runs for a specific
+commit and summarize their status and conclusion.
+
+Following this description, one might think that the check suite is a
+perfect mapping for a pipeline execution in zuul and a check run could map
+to a single job execution that is part of the pipeline run. Unfortunately,
+there are a few restrictions that don't allow this kind of mapping.
+
+First of all, check suites are completely managed by Github. Apart from
+creating a check suite for a commit SHA, we can't do anything with it.
+The current status, duration and the conclusion are all calculated and
+set by Github automatically whenever an included check run is updated.
+
+There can only be one check suite per commit sha per app. Thus, even if
+we could update the check suite, we wouldn't be able to create one check
+suite for each pipeline, e.g. check and gate.
+
+When configuring the branch protection in Github, only a check run can
+be selected as required status check. Having each job as a dedicated
+check run would result in a huge list of status checks one would have to
+enable to make the branch protection work. Additionally, we would then
+loose some of Zuul's features like non-voting jobs and it would break
+Zuul's gating capabilities as they are working on a pipeline level, not on
+a job level.
+
+Zuul can only report the whole buildset, but no individual jobs. With
+that we wouldn't be able to update individual check runs on a job level.
+
+Having said the above, the only possible integration of the checks API is
+on a pipeline level, so each pipeline execution maps to a check run in
+Github.
+
+Behaviour in Zuul
+-----------------
+
+Reporting
+~~~~~~~~~
+
+The Github reporter is able to report both a status
+:attr:`pipeline.<reporter>.<github source>.status` or a check
+:attr:`pipeline.<reporter>.<github source>.check`. While it's possible to
+configure a Github reporter to report both, it's recommended to use only one.
+Reporting both might result in duplicated status check entries in the Github
+PR (the section below the comments).
+
+Trigger
+~~~~~~~
+
+The Github driver is able to trigger on a reported check
+(:value:`pipeline.trigger.<github source>.event.check_run`) similar to a
+reported status (:value:`pipeline.trigger.<github source>.action.status`).
+
+Requirements
+~~~~~~~~~~~~
+
+While trigger and reporter differentiates between status and check, the Github
+driver does not differentiate between them when it comes to pipeline
+requirements. This is mainly because Github also doesn't differentiate between
+both in terms of branch protection and `status checks`_.
+
+Actions / Events
+----------------
+
+Github provides a set of default actions for check suites and check runs.
+Those actions are available as buttons in the Github UI. Clicking on those
+buttons will emit webhook events which will be handled by Zuul.
+
+These actions are only available on failed check runs / check suites. So
+far, a running or successful check suite / check run does not provide any
+action from Github side.
+
+Available actions are:
+
+Re-run all checks
+ Github emits a webhook event with type ``check_suite`` and action
+ ``rerequested`` that is meant to re-run all check-runs contained in this
+ check suite. Github does not provide the list of check-runs in that case,
+ so it's up to the Github app what should run.
+
+Re-run failed checks
+ Github emits a webhook event with type ``check_run`` and action
+ ``rerequested`` for each failed check run contained in this suite.
+
+Re-run
+ Github emits a webhook event with type ``check_run`` and action
+ ``rerequested`` for the specific check run.
+
+Zuul will handle all events except for the `Re-run all checks` event as
+this is not suitable for the Zuul workflow as it doesn't make sense to
+trigger all pipelines to run simultaniously.
+
+The drawback here is, that we are not able to customize those events in Github.
+Github will always say "You have successfully requested ..." although we aren't
+listening to the event at all. Therefore, it might be a solution to handle the
+`Re-run all checks` event in Zuul similar to `Re-run failed checks` just to
+not do anything while Github makes the user believe an action was really
+triggered.
+
+
+File comments (annotations)
+---------------------------
+
+Check runs can be used to post file comments directly in the files of the PR.
+Those are similar to user comments, but must provide some more information.
+
+Zuul jobs can already return file comments via ``zuul_return``
+(see: :ref:`return_values`). We can simply use this return value, build the
+necessary annotations (how Github calls it) from it and attach them to the
+check run.
+
+
+Custom actions
+~~~~~~~~~~~~~~
+
+Check runs can provide some custom actions which will result in additional
+buttons being available in the Github UI for this specific check run.
+Clicking on such a button will emit a webhook event with type ``check_run``
+and action ``requested_action`` and will additionally contain the id/name of
+the requested action which we can define when creating the action on the
+check run.
+
+We could use these custom actions to provide some "Re-run" action on a
+running check run (which might otherwise be stuck in case a check run update
+fails) or to abort a check run directly from the Github UI.
+
+
+Restrictions and Recommendations
+--------------------------------
+
+Although both the checks API and the status API can be activated for a
+Github reporter at the same time, it's not recommmended to do so as this might
+result in multiple status checks to be reported to the PR for the same pipeline
+execution (which would result in duplicated entries in the status section below
+the comments of a PR).
+
+In case the update on a check run fails (e.g. request timeout when reporting
+success or failure to Github), the check run will stay in status "in_progess"
+and there will be no way to re-run the check run via the Github UI as the
+predefined actions are only available on failed check runs.
+Thus, it's recommended to configure a
+:value:`pipeline.trigger.<github source>.action.comment` trigger on the
+pipeline to still be able to trigger re-run of the stuck check run via e.g.
+"recheck".
+
+The check suite will only list check runs that were reported by Zuul. If
+the requirements for a certain pipeline are not met and it is not run, the
+check run for this pipeline won't be listed in the check suite. However,
+this does not affect the required status checks. If the check run is enabled
+as required, Github will still show it in the list of required status checks
+- even if it didn't run yet - just not in the check suite.
+
+
+.. _Github Checks API: https://developer.github.com/v3/checks/
+.. _Check Suites: https://developer.github.com/v3/checks/suites/
+.. _Check Runs: https://developer.github.com/v3/checks/runs/
+.. _status checks: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-status-checks#types-of-status-checks-on-github
diff --git a/doc/source/discussion/index.rst b/doc/source/discussion/index.rst
index a02b5ff97..efa59d6a9 100644
--- a/doc/source/discussion/index.rst
+++ b/doc/source/discussion/index.rst
@@ -16,3 +16,4 @@ configure it to meet your needs.
gating
encryption
tenant-scoped-rest-api
+ github-checks-api
diff --git a/doc/source/examples/docker-compose.yaml b/doc/source/examples/docker-compose.yaml
index 839e9cf9e..ccb85ca1c 100644
--- a/doc/source/examples/docker-compose.yaml
+++ b/doc/source/examples/docker-compose.yaml
@@ -27,11 +27,17 @@ services:
- "sshkey:/var/ssh:z"
- "nodessh:/var/node:z"
- "./playbooks/:/var/playbooks/:z"
+ - "certs:/var/certs:z"
+ - "../../../tools/:/var/zuul-tools/:z"
# NOTE(pabelanger): Be sure to update this line each time we change the
# default version of ansible for Zuul.
command: "/usr/local/lib/zuul/ansible/2.8/bin/ansible-playbook /var/playbooks/setup.yaml"
zk:
image: zookeeper
+ hostname: examples_zk_1.examples_default
+ volumes:
+ - "certs:/var/certs:z"
+ - "./zoo.cfg:/conf/zoo.cfg:z"
mysql:
image: mariadb
environment:
@@ -59,6 +65,7 @@ services:
- "./etc_zuul/:/etc/zuul/:z"
- "./playbooks/:/var/playbooks/:z"
- "sshkey:/var/ssh:z"
+ - "certs:/var/certs:z"
web:
command: "sh -c '/var/playbooks/wait-to-start-gearman.sh && zuul-web -f'"
depends_on:
@@ -72,6 +79,7 @@ services:
volumes:
- "./etc_zuul/:/etc/zuul/:z"
- "./playbooks/:/var/playbooks/:z"
+ - "certs:/var/certs:z"
executor:
privileged: true
environment:
@@ -103,6 +111,7 @@ services:
image: zuul/nodepool-launcher
volumes:
- "./etc_nodepool/:/etc/nodepool/:z"
+ - "certs:/var/certs:z"
ports:
- "8022:8022"
logs:
@@ -122,3 +131,4 @@ volumes:
sshkey:
nodessh:
logs:
+ certs:
diff --git a/doc/source/examples/etc_nodepool/nodepool.yaml b/doc/source/examples/etc_nodepool/nodepool.yaml
index fd4252daa..e5d292b7c 100644
--- a/doc/source/examples/etc_nodepool/nodepool.yaml
+++ b/doc/source/examples/etc_nodepool/nodepool.yaml
@@ -1,5 +1,10 @@
zookeeper-servers:
- host: zk
+ port: 2281
+zookeeper-tls:
+ cert: /var/certs/certs/client.pem
+ key: /var/certs/keys/clientkey.pem
+ ca: /var/certs/certs/cacert.pem
labels:
- name: ubuntu-bionic
diff --git a/doc/source/examples/etc_zuul/zuul.conf b/doc/source/examples/etc_zuul/zuul.conf
index 8b8c34c23..518fd57f3 100644
--- a/doc/source/examples/etc_zuul/zuul.conf
+++ b/doc/source/examples/etc_zuul/zuul.conf
@@ -5,7 +5,10 @@ server=scheduler
start=true
[zookeeper]
-hosts=zk
+hosts=zk:2281
+tls_cert=/var/certs/certs/client.pem
+tls_key=/var/certs/keys/clientkey.pem
+tls_ca=/var/certs/certs/cacert.pem
[scheduler]
tenant_config=/etc/zuul/main.yaml
diff --git a/doc/source/examples/pipelines/gerrit-reference-pipelines.yaml b/doc/source/examples/pipelines/gerrit-reference-pipelines.yaml
index a20647a03..4e0fccaf5 100644
--- a/doc/source/examples/pipelines/gerrit-reference-pipelines.yaml
+++ b/doc/source/examples/pipelines/gerrit-reference-pipelines.yaml
@@ -100,6 +100,24 @@
sqlreporter:
- pipeline:
+ name: deploy
+ description: |
+ This pipeline runs jobs that operate after each change is merged
+ in order to deploy to production.
+ manager: serial
+ precedence: high
+ post-review: True
+ trigger:
+ gerrit:
+ - event: change-merged
+ success:
+ gerrit: {}
+ sqlreporter:
+ failure:
+ gerrit: {}
+ sqlreporter:
+
+- pipeline:
name: release
description: |
When a commit is tagged as a release, this pipeline runs jobs
diff --git a/doc/source/examples/playbooks/setup.yaml b/doc/source/examples/playbooks/setup.yaml
index d6e35365c..c17a971d8 100644
--- a/doc/source/examples/playbooks/setup.yaml
+++ b/doc/source/examples/playbooks/setup.yaml
@@ -1,6 +1,10 @@
- hosts: localhost
gather_facts: false
tasks:
+ - name: Generate ZooKeeper certs
+ shell: |
+ /var/zuul-tools/zk-ca.sh /var/certs examples_zk_1.examples_default
+ chmod -R a+rX /var/certs
- name: Wait for Gerrit to start
wait_for:
host: gerrit
diff --git a/doc/source/examples/zoo.cfg b/doc/source/examples/zoo.cfg
new file mode 100644
index 000000000..4d4fcc3ea
--- /dev/null
+++ b/doc/source/examples/zoo.cfg
@@ -0,0 +1,15 @@
+dataDir=/data
+dataLogDir=/datalog
+tickTime=2000
+initLimit=5
+syncLimit=2
+autopurge.snapRetainCount=3
+autopurge.purgeInterval=0
+maxClientCnxns=60
+standaloneEnabled=true
+admin.enableServer=true
+server.1=examples_zk_1.examples_default:2888:3888
+serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
+secureClientPort=2281
+ssl.keyStore.location=/var/certs/keystores/examples_zk_1.examples_default.pem
+ssl.trustStore.location=/var/certs/certs/cacert.pem
diff --git a/doc/source/howtos/admin.rst b/doc/source/howtos/admin.rst
index a829d2528..9fd4933f2 100644
--- a/doc/source/howtos/admin.rst
+++ b/doc/source/howtos/admin.rst
@@ -7,3 +7,4 @@ Admin How-to Guides
installation
zuul-from-scratch
troubleshooting
+ zookeeper
diff --git a/doc/source/howtos/zookeeper.rst b/doc/source/howtos/zookeeper.rst
new file mode 100644
index 000000000..ef127a599
--- /dev/null
+++ b/doc/source/howtos/zookeeper.rst
@@ -0,0 +1,118 @@
+ZooKeeper Administration
+========================
+
+This section will cover some basic tasks and recommendations when
+setting up ZooKeeper for use with Zuul. A complete tutorial for
+ZooKeeper is out of scope for this documentation.
+
+Configuration
+-------------
+
+The following general configuration setting in
+``/etc/zookeeper/zoo.cfg`` is recommended:
+
+.. code-block::
+
+ autopurge.purgeInterval=6
+
+This instructs ZooKeeper to purge old snapshots every 6 hours. This
+will avoid filling the disk.
+
+Encrypted Connections
+---------------------
+
+ZooKeeper version 3.5.1 or greater is required for TLS support.
+ZooKeeper performs hostname validation for all ZooKeeper servers
+("quorum members"), therefore each member of the ZooKeeper cluster
+should have its own certificate. This does not apply to clients which
+may share a certificate.
+
+ZooKeeper performs certificate validation on all connections (server
+and client). If you use a private Certificate Authority (CA) (which
+is generally recommended and discussed below), then these TLS
+certificates not only serve to encrypt traffic, but also to
+authenticate and authorize clients to the cluster. Only clients with
+certificates authorized by a CA explicitly trusted by your ZooKeeper
+installation will be able to connect.
+
+.. note:: The instructions below direct you to sign certificates with
+ a CA that you create specifically for Zuul's ZooKeeper
+ cluster. If you use a CA you share with other users in your
+ organization, any certificate signed by that CA will be able
+ to connect to your ZooKeeper cluster. In this case, you may
+ need to take additional steps such as network isolation to
+ protect your ZooKeeper cluster. These are beyond the scope
+ of this document.
+
+The ``tools/zk-ca.sh`` script in the Zuul source code repository can
+be used to quickly and easily generate self-signed certificates for
+all ZooKeeper cluster members and clients.
+
+Make a directory for it to store the certificates and CA data, and run
+it once for each client:
+
+.. code-block::
+
+ mkdir /etc/zookeeper/ca
+ tools/zk-ca.sh /etc/zookeeper/ca zookeeper1.example.com
+ tools/zk-ca.sh /etc/zookeeper/ca zookeeper2.example.com
+ tools/zk-ca.sh /etc/zookeeper/ca zookeeper3.example.com
+
+Add the following to ``/etc/zookeeper/zoo.cfg``:
+
+.. code-block::
+
+ # Necessary for TLS support
+ serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
+
+ # Client TLS configuration
+ secureClientPort=2281
+ ssl.keyStore.location=/etc/zookeeper/ca/keystores/zookeeper1.example.com.jks
+ ssl.keyStore.password=keystorepassword
+ ssl.trustStore.location=/etc/zookeeper/ca/certs/cacert.pem
+
+ # Server TLS configuration
+ sslQuorum=true
+ ssl.quorum.keyStore.location=/etc/zookeeper/ca/keystores/zookeeper1.example.com.jks
+ ssl.quorum.keyStore.password=keystorepassword
+ ssl.quorum.trustStore.location=/etc/zookeeper/ca/certs/cacert.pem
+
+Change the name of the certificate filenames as appropriate for the
+host (e.g., ``zookeeper1.example.com.jks``). Note that the keystore
+password ``keystorepassword``, which is set by the ``zk-ca.sh``
+script, does not need to be changed as long as file permissions
+provide sufficient protection. The password is present because many
+Java utilities misbehave when interacting with keystores with empty or
+missing passwords.
+
+In order to disable plaintext connections, ensure that the
+``clientPort`` option does not appear in ``zoo.cfg``. Use the new
+method of specifying Zookeeper quorum servers, which looks like this:
+
+.. code-block::
+
+ server.1=zookeeper1.example.com:2888:3888
+ server.2=zookeeper2.example.com:2888:3888
+ server.3=zookeeper3.example.com:2888:3888
+
+This format normally includes ``;2181`` at the end of each line,
+signifying that the server should listen on port 2181 for plaintext
+client connections (this is equivalent to the ``clientPort`` option).
+Omit it to disable plaintext connections. The earlier addition of
+``secureClientPort`` to the config file instructs ZooKeeper to listen
+for encrypted connections on port 2281.
+
+Be sure to specify port 2281 rather than the standard 2181 in the
+:attr:`zookeeper.hosts` setting in ``zuul.conf``.
+
+Finally, add the :attr:`zookeeper.tls_cert`,
+:attr:`zookeeper.tls_key`, and :attr:`zookeeper.tls_ca` options. Your
+``zuul.conf`` file should look like:
+
+.. code-block::
+
+ [zookeeper]
+ hosts=zookeeper1.example.com:2281,zookeeper2.example.com:2281,zookeeper3.example.com:2281
+ tls_cert=/etc/zookeeper/ca/certs/client.pem
+ tls_key=/etc/zookeeper/ca/keys/clientkey.pem
+ tls_ca=/etc/zookeeper/ca/certs/cacert.pem
diff --git a/doc/source/reference/developer/specs/kubernetes-operator.rst b/doc/source/reference/developer/specs/kubernetes-operator.rst
index b49f4e0f6..18e3e6cc1 100644
--- a/doc/source/reference/developer/specs/kubernetes-operator.rst
+++ b/doc/source/reference/developer/specs/kubernetes-operator.rst
@@ -191,7 +191,7 @@ needs should be in ConfigMaps and referenced.
config: nodepoolYamlConfig
builder:
config: nodepoolYamlConfig
- external_config:
+ externalConfig:
openstack:
secretName: cloudsYaml
kubernetes:
@@ -343,6 +343,46 @@ pods that need them.
keyfile:
secretName: mqttSecrets
+Executor job volume
+-------------------
+
+To manage the executor job volumes, the CR also accepts a list of volumes
+to be bind mounted in the job bubblewrap contexts:
+
+::
+
+ name: Text
+ context: <trusted | untrusted>
+ access: <ro | rw>
+ path: /path
+ volume: Kubernetes.Volume
+
+
+For example, to expose a GCP authdaemon token, the Zuul CR can be defined as
+
+::
+
+ apiVersion: zuul-ci.org/v1alpha1
+ kind: Zuul
+ spec:
+ ...
+ jobVolumes:
+ - context: trusted
+ access: ro
+ path: /authdaemon/token
+ volume:
+ name: gcp-auth
+ hostPath:
+ path: /var/authdaemon/executor
+ type: DirectoryOrCreate
+
+Which would result in a new executor mountpath along with this zuul.conf change:
+
+::
+
+ trusted_ro_paths=/authdaemon/token
+
+
Logging
-------
diff --git a/doc/source/reference/drivers/github.rst b/doc/source/reference/drivers/github.rst
index 474c8ac82..c2c085eca 100644
--- a/doc/source/reference/drivers/github.rst
+++ b/doc/source/reference/drivers/github.rst
@@ -499,6 +499,21 @@ enqueued into the pipeline.
request. The syntax is ``user:status:value``. This can also
be a regular expression.
+ Zuul does not differentiate between a status reported via
+ status API or via checks API (which is also how Github behaves
+ in terms of branch protection and `status checks`__).
+ Thus, the status could be reported by a
+ :attr:`pipeline.<reporter>.<github source>.status` or a
+ :attr:`pipeline.<reporter>.<github source>.check`.
+
+ When a status is reported via the status API, Github will add
+ a ``[bot]`` to the name of the app that reported the status,
+ resulting in something like ``user[bot]:status:value``. For a
+ status reported via the checks API, the app's slug will be
+ used as is.
+
+ .. __: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-status-checks#types-of-status-checks-on-github
+
.. attr:: label
A string value indicating that the pull request must have the
diff --git a/doc/source/reference/drivers/pagure.rst b/doc/source/reference/drivers/pagure.rst
index 320cf33f8..7845ae79a 100644
--- a/doc/source/reference/drivers/pagure.rst
+++ b/doc/source/reference/drivers/pagure.rst
@@ -12,12 +12,30 @@ installations of Pagure.
Configure Pagure
----------------
-Pagure's project owner must give project Admin access to the Pagure's user
-that own the API key defined in the Zuul configuration. The API key
-must at least have the ``Modify an existing project`` access.
-
-Furthermore Project owner must set the web hook target url in project settings
-such as: ``http://<zuul-web>/zuul/api/connection/<conn-name>/payload``
+The user's API token configured in zuul.conf must have the following
+ACL rights:
+
+- "Merge a pull-request" set to on (optional, only for gating)
+- "Flag a pull-request" set to on
+- "Comment on a pull-request" set to on
+- "Modify an existing project" set to on
+
+Each project to be integrated with Zuul needs:
+
+- "Web hook target" set to
+ http://<zuul-web>/zuul/api/connection/<conn-name>/payload
+- "Pull requests" set to on
+- "Open metadata access to all" set to off (optional, expected if approval
+ based on PR a metadata tag)
+- "Minimum score to merge pull-request" set to the same value than
+ the score requierement (optional, expected if score requierement is
+ defined in a pipeline)
+
+Furthermore, the user must be added as project collaborator
+(**ticket** access level), to be able to read the project's
+webhook token. This token is used to validate webhook's payload. But
+if Zuul is configured to merge pull requests then the access level
+must be **commit**.
Connection Configuration
------------------------
@@ -64,6 +82,24 @@ The supported options in ``zuul.conf`` connections are:
Path to the Pagure Git repositories. Used to clone.
+ .. attr:: app_name
+ :default: Zuul
+
+ Display name that will appear as the application name in front
+ of each CI status flag.
+
+ .. attr:: source_whitelist
+ :default: ''
+
+ A comma separated list of source ip adresses from which webhook
+ calls are whitelisted. If the source is not whitelisted, then
+ call payload's signature is verified using the project webhook
+ token. An admin access to the project is required by Zuul to read
+ the token. White listing a source of hook calls allows Zuul to
+ react to events without any authorizations. This setting should
+ not be used in production.
+
+
Trigger Configuration
---------------------
Pagure webhook events can be configured as triggers.
diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst
index 994fdf2fd..536c80034 100644
--- a/doc/source/reference/glossary.rst
+++ b/doc/source/reference/glossary.rst
@@ -66,7 +66,10 @@ Glossary
yet seen any human review, so care must be taken in selecting
the kinds of jobs to run, and what resources will be available
to them in order to avoid misuse of the system or credential
- compromise.
+ compromise. It usually has an :value:`independent
+ <pipeline.manager.independent>` pipeline manager since the final
+ sequence of changes to merge is not generally known at the time
+ of upload.
config-project
@@ -94,6 +97,20 @@ Glossary
incorporate any cross-project dependencies into the context
for the change declaring that dependency relationship.
+ deploy
+
+ By convention, the name of a continuous-deployment pipeline.
+ Such a pipeline typically interacts with production systems
+ rather than ephemeral test nodes. By triggering on merge events
+ the results of deployment can be reported back to the
+ originating change. The :value:`serial
+ <pipeline.manager.serial>` pipeline manager, is recommended if
+ multiple repositories are involved and only some jobs (based on
+ file matchers) will be run for each change. If a single repo is
+ involved and all deployment jobs run on every change merged,
+ then :value:`supercedent <pipeline.manager.supercedent>` may be
+ a better fit.
+
executor
The component of Zuul responsible for executing a sandboxed
@@ -224,6 +241,21 @@ Glossary
of reusing the same sets of jobs in the same pipelines across
multiple projects.
+ promote
+
+ By convention, the name of a pipeline which uploads previously
+ built artifacts. These artifacts should be constructed in a
+ :term:`gate` pipeline and uploaded to a temporary location.
+ When all of the jobs in the gate pipeline succeed, the change
+ will be merged and may then be enqueued into a promote pipeline.
+ Jobs running in this pipeline do so with the understanding that
+ since the change merged as it was tested in the gate, any
+ artifacts created at that time are now safe to promote to
+ production. It is a good choice to use a :value:`supercedent
+ <pipeline.manager.supercedent>` pipeline manager so that if many
+ changes merge in rapid sequence, Zuul may skip promoting all but
+ the latest artifact to production.
+
provided artifact
A named artifact which builds of a job are expected to
@@ -232,6 +264,30 @@ Glossary
same name, allowing these relationships to be defined
independent of the specific jobs which provide them.
+ post
+
+ By convention, the name of a pipeline which runs after a branch
+ is updated. By triggering on a branch update (rather than a
+ merge) event, jobs in this pipeline may run with the final git
+ state after the merge (including any merge commits generated by
+ the upstream code review system). This is important when
+ building some artifacts in order that the exact commit ids are
+ present in the git repo. The downside to this approach is that
+ jobs in this pipeline run without any connection to the
+ underlying changes which created the commits. If only the
+ latest updates to a branch matter, then the :value:`supercedent
+ <pipeline.manager.supercedent>` pipeline manager is recommended;
+ otherwise :value:`independent <pipeline.manager.independent>`
+ may be a better choice. See also :term:`tag` and
+ :term:`release`.
+
+ release
+
+ By convention, the name of a pipeline which runs after a
+ release-formatted tag is updated. Other than the matching ref,
+ this is typically constructed the same as a :term:`post`
+ pipeline. See also :term:`tag`.
+
reporter
A reporter is a :ref:`pipeline attribute <reporters>` which
@@ -276,6 +332,13 @@ Glossary
queue items to no longer include it in their respective
contexts.
+ tag
+
+ By convention, the name of a pipeline which runs after a tag is
+ updated. Other than the matching ref, this is typically
+ constructed the same as a :term:`post` pipeline. See also
+ :term:`release`.
+
tenant
A set of projects on which Zuul should operate. Configuration
diff --git a/doc/source/reference/governance.rst b/doc/source/reference/governance.rst
index 69e6cffc4..64f5f4f3d 100644
--- a/doc/source/reference/governance.rst
+++ b/doc/source/reference/governance.rst
@@ -69,7 +69,6 @@ Name Freenode Nick
====================== =============
Clark Boylan clarkb
Clint Byrum SpamapS
-David Shrewsbury Shrews
Ian Wienand ianw
James E. Blair corvus
Jens Harbott frickler
@@ -129,6 +128,6 @@ Current Zuul-Jobs Maintainers (in addition to Zuul Maintainers):
Name Freenode Nick
====================== =============
Andreas Jaeger AJaeger
-David Moreau Simard dmsimard
Mohammed Naser mnaser
+Albin Vass avass
====================== =============
diff --git a/doc/source/reference/job_def.rst b/doc/source/reference/job_def.rst
index b5fe63627..d90dbfc62 100644
--- a/doc/source/reference/job_def.rst
+++ b/doc/source/reference/job_def.rst
@@ -703,7 +703,7 @@ Here is an example of two job definitions:
or *soft* dependency. A *hard* dependency will cause an
error if the specified job is not run. That is, if job B
depends on job A, but job A is not run for any reason (for
- example, it containes a file matcher which does not match),
+ example, it contains a file matcher which does not match),
then Zuul will not run any jobs and report an error. A
*soft* dependency will simply be ignored if the dependent job
is not run.
diff --git a/doc/source/reference/jobs.rst b/doc/source/reference/jobs.rst
index b39629604..bd4ee2d11 100644
--- a/doc/source/reference/jobs.rst
+++ b/doc/source/reference/jobs.rst
@@ -881,10 +881,12 @@ change, set the **zuul.file_comments** value. For example:
path/to/file.py:
- line: 42
message: "Line too long"
+ level: info
- line: 82
message: "Line too short"
- line: 119
message: "This block is indented too far."
+ level: warning
range:
start_line: 117
start_character: 0
@@ -893,7 +895,8 @@ change, set the **zuul.file_comments** value. For example:
Not all reporters currently support line comments (or all of the
features of line comments); in these cases, reporters will simply
-ignore this data.
+ignore this data. The ``level`` is optional, but if provided must
+be one of ``info``, ``warning``, ``error``.
Zuul will attempt to automatically translate the supplied line numbers
to the corresponding lines in the original change as written (they may
diff --git a/doc/source/reference/pipeline_def.rst b/doc/source/reference/pipeline_def.rst
index 8b73282e2..e2c2d9587 100644
--- a/doc/source/reference/pipeline_def.rst
+++ b/doc/source/reference/pipeline_def.rst
@@ -63,7 +63,18 @@ success, the pipeline reports back to Gerrit with ``Verified`` vote of
.. attr:: manager
:required:
- There are three schemes for managing pipelines:
+ There are several schemes for managing pipelines. The following
+ table summarizes their features; each is described in detail
+ below.
+
+ =========== ============================= ============ ===== ============= =========
+ Manager Use Case Dependencies Merge Shared Queues Window
+ =========== ============================= ============ ===== ============= =========
+ Independent :term:`check`, :term:`post` No No No Unlimited
+ Dependent :term:`gate` Yes Yes Yes Variable
+ Serial :term:`deploy` No No Yes 1
+ Supercedent :term:`post`, :term:`promote` No No Project-ref 1
+ =========== ============================= ============ ===== ============= =========
.. value:: independent
@@ -107,6 +118,22 @@ success, the pipeline reports back to Gerrit with ``Verified`` vote of
For more detail on the theory and operation of Zuul's
dependent pipeline manager, see: :doc:`/discussion/gating`.
+ .. value:: serial
+
+ This pipeline manager supports shared queues (like depedent
+ pipelines) but only one item in each shared queue is
+ processed at a time.
+
+ This may be useful for post-merge pipelines which perform
+ partial production deployments (i.e., there are jobs with
+ file matchers which only deploy to affected parts of the
+ system). In such a case it is important for every change to
+ be processed, but they must still be processed one at a time
+ in order to ensure that the production system is not
+ inadvertently regressed. Support for shared queues ensures
+ that if multiple projects are involved deployment runs still
+ execute sequentially.
+
.. value:: supercedent
This is like an independent pipeline, in that every item is
@@ -124,11 +151,12 @@ success, the pipeline reports back to Gerrit with ``Verified`` vote of
these cases, build resources can be conserved by avoiding
building intermediate versions.
- .. note:: Since this pipeline filters intermediate buildsets using
- it in combination with file filters on jobs is dangerous.
- In this case jobs of in between buildsets can be
- unexpectedly skipped entirely. If file filters are needed
- the independent pipeline manager should be used.
+ .. note:: Since this pipeline filters intermediate buildsets
+ using it in combination with file filters on jobs
+ is dangerous. In this case jobs of in between
+ buildsets can be unexpectedly skipped entirely. If
+ file filters are needed the ``independent`` or
+ ``serial`` pipeline managers should be used.
.. attr:: post-review
:default: false
diff --git a/doc/source/tutorials/quick-start.rst b/doc/source/tutorials/quick-start.rst
index e7c227518..e640ff261 100644
--- a/doc/source/tutorials/quick-start.rst
+++ b/doc/source/tutorials/quick-start.rst
@@ -530,6 +530,15 @@ output from the job's playbook.
Further Steps
-------------
+You now have a Zuul system up and running, congratulations!
+
+The Zuul community would love to hear about how you plan to use Zuul.
+Please take a few moments to fill out the `Zuul User Survey
+<https://www.surveymonkey.com/r/K2B2MWL>`_ to provide feedback and
+information around your deployment. All information is confidential
+to the OpenStack Foundation unless you designate that it can be
+public.
+
If you would like to make further changes to Zuul, its configuration
files are located in the ``zuul/doc/source/examples`` directory
and are bind-mounted into the running containers. You may edit them
diff --git a/playbooks/quick-start/run.yaml b/playbooks/quick-start/run.yaml
index 9ee18c3a7..b58b31c55 100644
--- a/playbooks/quick-start/run.yaml
+++ b/playbooks/quick-start/run.yaml
@@ -3,7 +3,7 @@
workspace: /tmp/quickstart-test
local: false
roles:
- - name: install-docker
+ - name: ensure-docker
use_upstream_docker: false
docker_compose_install: true
- use-buildset-registry
diff --git a/playbooks/release/pre.yaml b/playbooks/release/pre.yaml
index 02305e385..5f7f44bfe 100644
--- a/playbooks/release/pre.yaml
+++ b/playbooks/release/pre.yaml
@@ -1,5 +1,5 @@
- hosts: all
roles:
- - install-nodejs
- - role: install-yarn
+ - ensure-nodejs
+ - role: ensure-yarn
yarn_lock_file_path: "{{ zuul.project.src_dir }}/web/yarn.lock"
diff --git a/releasenotes/notes/ansible-27-deprecated-cd82a8a47a10a8c7.yaml b/releasenotes/notes/ansible-27-deprecated-cd82a8a47a10a8c7.yaml
new file mode 100644
index 000000000..d1f9e0c94
--- /dev/null
+++ b/releasenotes/notes/ansible-27-deprecated-cd82a8a47a10a8c7.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Ansible 2.7 is now deprecated since it only receives security updates
+ and will be end of life soon.
diff --git a/releasenotes/notes/file-comment-levels-9b26156addc3644f.yaml b/releasenotes/notes/file-comment-levels-9b26156addc3644f.yaml
new file mode 100644
index 000000000..ec5dff2bb
--- /dev/null
+++ b/releasenotes/notes/file-comment-levels-9b26156addc3644f.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ The annotation levels for the file comments reported via Github checks
+ API are now configurable in ``zuul_return``. Each file comment entry can
+ provide an optional ``level`` parameter ``[info|warning|error]`` that will
+ be picked up by the Github reporter.
+
+ For more details on how to provide file comments from Zuul, see the
+ documentation of the :ref:`return_values`.
diff --git a/releasenotes/notes/github-require-check-294d3f27da790fae.yaml b/releasenotes/notes/github-require-check-294d3f27da790fae.yaml
new file mode 100644
index 000000000..a01d59b20
--- /dev/null
+++ b/releasenotes/notes/github-require-check-294d3f27da790fae.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The status pipeline requirements of the Github driver
+ :attr:`pipeline.require.<github source>.status` now also matches
+ on statuses reported via the Github checks API.
diff --git a/releasenotes/notes/serial-manager-8f2dcf924c72effe.yaml b/releasenotes/notes/serial-manager-8f2dcf924c72effe.yaml
new file mode 100644
index 000000000..dd2a50ab2
--- /dev/null
+++ b/releasenotes/notes/serial-manager-8f2dcf924c72effe.yaml
@@ -0,0 +1,6 @@
+features:
+ - |
+ The :value:`pipeline.manager.serial` pipeline manager has been
+ added. It is designed to handle serialized deployment pipelines
+ where supercedent is unsuitable in the case that not all jobs run
+ on every merge.
diff --git a/releasenotes/notes/strip-encrypt-secret-87b06dc887312117.yaml b/releasenotes/notes/strip-encrypt-secret-87b06dc887312117.yaml
new file mode 100644
index 000000000..26a0caf95
--- /dev/null
+++ b/releasenotes/notes/strip-encrypt-secret-87b06dc887312117.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ The default behavior of the ``tools/encrypt_secret.py``
+ helper script is now to strip incoming input of leading
+ and trailing whitespace. A new ``--no-strip`` option
+ has been added to support people with secrets that contain
+ valid leading or trailing whitespace.
diff --git a/releasenotes/notes/zookeeper-tls-2748002ff9afbf77.yaml b/releasenotes/notes/zookeeper-tls-2748002ff9afbf77.yaml
new file mode 100644
index 000000000..05c47c56c
--- /dev/null
+++ b/releasenotes/notes/zookeeper-tls-2748002ff9afbf77.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Zuul now supports a TLS secured connection to ZooKeeper.
+upgrade:
+ - |
+ Please configure your Zuul to use TLS secured connection. Running Zuul
+ with an unsecured connection to ZooKeeper is deprecated and will be
+ unsupported in a future release. See :attr:`zookeeper`
+ for details.
diff --git a/releasenotes/notes/zuul-ansible-environment-secrets-983e8ced908b294d.yaml b/releasenotes/notes/zuul-ansible-environment-secrets-983e8ced908b294d.yaml
new file mode 100644
index 000000000..8cf6ef8e4
--- /dev/null
+++ b/releasenotes/notes/zuul-ansible-environment-secrets-983e8ced908b294d.yaml
@@ -0,0 +1,5 @@
+---
+security:
+ - |
+ Zuul no longer adds environment variables starting with the ``ZUUL_`` prefix
+ to ansibles environment which could result in secrets being exposed.
diff --git a/requirements.txt b/requirements.txt
index 2aded3236..732aa35a8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@ pbr>=1.1.0
# Temporary fix for https://gitlab.com/python-devs/importlib_resources/issues/83
importlib-resources==1.0.2
# Early virtualenv 20 had bad file location assumptions
-virtualenv!=20.0.0,!=20.0.1
+virtualenv!=20.0.0,!=20.0.1,>20
github3.py>=1.1.0
PyYAML>=3.1.0
@@ -28,7 +28,7 @@ iso8601
psutil
fb-re2>=1.0.6
paho-mqtt
-cherrypy
+cherrypy==18.3.0; # last version to support cheroot below
ws4py
routes
pathspec
diff --git a/setup.cfg b/setup.cfg
index a552d76f9..3c3dbdd63 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -57,3 +57,5 @@ migrate=
zuul_base =
PyMySQL
psycopg2-binary
+ yappi
+ objgraph
diff --git a/tests/base.py b/tests/base.py
index 527bdf66c..6d369133d 100644
--- a/tests/base.py
+++ b/tests/base.py
@@ -30,6 +30,8 @@ import queue
import random
import re
from logging import Logger
+from queue import Queue
+from typing import Callable, Optional, Any, Iterable, Generator, List
import requests
import select
@@ -90,8 +92,7 @@ import zuul.configloader
from zuul.lib.config import get_default
from zuul.lib.logutil import get_annotated_logger
-FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
- 'fixtures')
+FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
KEEP_TEMPDIRS = bool(os.environ.get('KEEP_TEMPDIRS', False))
@@ -637,8 +638,11 @@ class FakeGerritChange(object):
path = os.path.join(self.upstream_root, self.project)
repo = git.Repo(path)
- repo.heads[self.branch].commit = \
- repo.commit(self.patchsets[-1]['revision'])
+
+ repo.head.reference = self.branch
+ zuul.merger.merger.reset_repo_to_head(repo)
+ repo.git.merge('-s', 'resolve', self.patchsets[-1]['ref'])
+ repo.heads[self.branch].commit = repo.head.commit
def setReported(self):
self.reported += 1
@@ -1231,19 +1235,33 @@ class FakePagurePullRequest(object):
return self._getPullRequestEvent(
'pull-request.tag.added', pull_data_field='pull_request')
- def getPullRequestStatusSetEvent(self, status):
+ def getPullRequestStatusSetEvent(self, status, username="zuul"):
self.addFlag(
- status, "https://url", "Build %s" % status)
+ status, "https://url", "Build %s" % status, username)
return self._getPullRequestEvent('pull-request.flag.added')
- def addFlag(self, status, url, comment, username="Pingou"):
+ def insertFlag(self, flag):
+ to_pop = None
+ for i, _flag in enumerate(self.flags):
+ if _flag['uid'] == flag['uid']:
+ to_pop = i
+ if to_pop is not None:
+ self.flags.pop(to_pop)
+ self.flags.insert(0, flag)
+
+ def addFlag(self, status, url, comment, username="zuul"):
+ flag_uid = "%s-%s-%s" % (username, self.number, self.project)
flag = {
- "username": username,
+ "username": "Zuul CI",
+ "user": {
+ "name": username
+ },
+ "uid": flag_uid[:32],
"comment": comment,
"status": status,
"url": url
}
- self.flags.insert(0, flag)
+ self.insertFlag(flag)
self._updateTimeStamp()
def editInitialComment(self, initial_comment):
@@ -1323,9 +1341,9 @@ class FakePagureAPIClient(pagureconnection.PagureAPIClient):
log = logging.getLogger("zuul.test.FakePagureAPIClient")
def __init__(self, baseurl, api_token, project,
- token_exp_date=None, pull_requests_db={}):
+ pull_requests_db={}):
super(FakePagureAPIClient, self).__init__(
- baseurl, api_token, project, token_exp_date)
+ baseurl, api_token, project)
self.session = None
self.pull_requests = pull_requests_db
self.return_post_error = None
@@ -1401,13 +1419,18 @@ class FakePagureAPIClient(pagureconnection.PagureAPIClient):
pr.is_merged = True
return {}, 200, "", "POST"
+ match = re.match(r'.+/api/0/-/whoami$', url)
+ if match:
+ return {"username": "zuul"}, 200, "", "POST"
+
if not params:
return self.gen_error("POST")
match = re.match(r'.+/api/0/(.+)/pull-request/(\d+)/flag$', url)
if match:
pr = self._get_pr(match)
- pr.flags.insert(0, params)
+ params['user'] = {"name": "zuul"}
+ pr.insertFlag(params)
match = re.match(r'.+/api/0/(.+)/pull-request/(\d+)/comment$', url)
if match:
@@ -1432,23 +1455,26 @@ class FakePagureConnection(pagureconnection.PagureConnection):
self.reports = []
self.rpcclient = rpcclient
self.cloneurl = self.upstream_root
- self.connectors = {}
-
- def _refresh_project_connectors(self, project):
- connector = self.connectors.setdefault(
- project, {'api_client': None, 'webhook_token': None})
- api_token_exp_date = int(time.time()) + 60 * 24 * 3600
- connector['api_client'] = FakePagureAPIClient(
- self.baseurl, "fake_api_token-%s" % project, project,
- token_exp_date=api_token_exp_date,
+
+ def get_project_api_client(self, project):
+ client = FakePagureAPIClient(
+ self.baseurl, None, project,
pull_requests_db=self.pull_requests)
- connector['webhook_token'] = "fake_webhook_token-%s" % project
- return connector
+ if not self.username:
+ self.set_my_username(client)
+ return client
- def emitEvent(self, event, use_zuulweb=False, project=None):
+ def get_project_webhook_token(self, project):
+ return 'fake_webhook_token-%s' % project
+
+ def emitEvent(self, event, use_zuulweb=False, project=None,
+ wrong_token=False):
name, payload = event
- secret = 'fake_webhook_token-%s' % project
if use_zuulweb:
+ if not wrong_token:
+ secret = 'fake_webhook_token-%s' % project
+ else:
+ secret = ''
payload = json.dumps(payload).encode('utf-8')
signature, _ = pagureconnection._sign_request(payload, secret)
headers = {'x-pagure-signature': signature,
@@ -3182,7 +3208,7 @@ class ZuulWebFixture(fixtures.Fixture):
self.authenticators = zuul.lib.auth.AuthenticatorRegistry()
self.authenticators.configure(config)
if info is None:
- self.info = zuul.model.WebInfo()
+ self.info = zuul.model.WebInfo.fromConfig(config)
else:
self.info = info
self.zk_hosts = zk_hosts
@@ -3196,6 +3222,7 @@ class ZuulWebFixture(fixtures.Fixture):
info=self.info,
connections=self.connections,
zk_hosts=self.zk_hosts,
+ zk_timeout=10,
command_socket=os.path.join(self.test_root, 'web.socket'),
authenticators=self.authenticators)
self.web.start()
@@ -3439,22 +3466,22 @@ class SchedulerTestApp:
self.sched.registerConnections(connections)
- self.executor_client = zuul.executor.client.ExecutorClient(
+ executor_client = zuul.executor.client.ExecutorClient(
self.config, self.sched)
- self.merge_client = RecordingMergeClient(self.config, self.sched)
- self.nodepool = zuul.nodepool.Nodepool(self.sched)
- self.zk = zuul.zk.ZooKeeper(enable_cache=True)
- self.zk.connect(self.zk_config, timeout=60.0)
+ merge_client = RecordingMergeClient(self.config, self.sched)
+ nodepool = zuul.nodepool.Nodepool(self.sched)
+ zk = zuul.zk.ZooKeeper(enable_cache=True)
+ zk.connect(self.zk_config, timeout=30.0)
- self.sched.setExecutor(self.executor_client)
- self.sched.setMerger(self.merge_client)
- self.sched.setNodepool(self.nodepool)
- self.sched.setZooKeeper(self.zk)
+ self.sched.setExecutor(executor_client)
+ self.sched.setMerger(merge_client)
+ self.sched.setNodepool(nodepool)
+ self.sched.setZooKeeper(zk)
self.sched.start()
- self.executor_client.gearman.waitForServer()
+ executor_client.gearman.waitForServer()
self.sched.reconfigure(self.config)
- self.sched.resume()
+ self.sched.wakeUp()
def fullReconfigure(self):
try:
@@ -3475,6 +3502,52 @@ class SchedulerTestApp:
self.log.exception("Reconfiguration failed:")
+class SchedulerTestManager:
+ def __init__(self):
+ self.instances = []
+
+ def create(self, log: Logger, config: ConfigParser, zk_config: str,
+ connections: ConnectionRegistry) -> SchedulerTestApp:
+ app = SchedulerTestApp(log, config, zk_config, connections)
+ self.instances.append(app)
+ return app
+
+ def __len__(self) -> int:
+ return len(self.instances)
+
+ def __getitem__(self, item: int) -> SchedulerTestApp:
+ return self.instances[item]
+
+ def __setitem__(self, key: int, value: SchedulerTestApp):
+ raise Exception("Not implemented, use create method!")
+
+ def __delitem__(self, key, value):
+ raise Exception("Not implemented!")
+
+ def __iter__(self):
+ return iter(self.instances)
+
+ @property
+ def first(self) -> SchedulerTestApp:
+ if len(self.instances) == 0:
+ raise Exception("No scheduler!")
+ return self.instances[0]
+
+ def filter(self, matcher=None) -> Iterable[SchedulerTestApp]:
+ fcn = None # type: Optional[Callable[[int, SchedulerTestApp], bool]]
+ if type(matcher) == list:
+ def fcn(_: int, app: SchedulerTestApp) -> bool:
+ return app in matcher
+ elif type(matcher).__name__ == 'function':
+ fcn = matcher
+ return [e[1] for e in enumerate(self.instances)
+ if fcn is None or fcn(e[0], e[1])]
+
+ def execute(self, function: Callable[[Any], None], matcher=None) -> None:
+ for instance in self.filter(matcher):
+ function(instance)
+
+
class ZuulTestCase(BaseTestCase):
"""A test case with a functioning Zuul.
@@ -3648,7 +3721,7 @@ class ZuulTestCase(BaseTestCase):
gerritsource.GerritSource.replication_retry_interval = 0.5
gerritconnection.GerritEventConnector.delay = 0.0
- self.event_queues = []
+ self.additional_event_queues = []
self.poller_events = {}
self.configure_connections()
@@ -3663,27 +3736,27 @@ class ZuulTestCase(BaseTestCase):
self.history = self.executor_server.build_history
self.builds = self.executor_server.running_builds
- self.sched_app = SchedulerTestApp(self.log, self.config,
- self.zk_config,
- self.connections)
- self.sched = self.sched_app.sched
- self.event_queues = self.sched_app.event_queues + self.event_queues
+ self.scheds = SchedulerTestManager()
+ self.scheds.create(
+ self.log, self.config, self.zk_config, self.connections)
if hasattr(self, 'fake_github'):
- self.event_queues.append(
+ self.additional_event_queues.append(
self.fake_github.github_event_connector._event_forward_queue)
- self.executor_client = self.sched_app.executor_client
- self.merge_client = self.sched_app.merge_client
self.merge_server = None
- self.nodepool = self.sched_app.nodepool
- self.zk = self.sched_app.zk
# Cleanups are run in reverse order
self.addCleanup(self.assertCleanShutdown)
self.addCleanup(self.shutdown)
self.addCleanup(self.assertFinalState)
+ def __event_queues(self, matcher) -> List[Queue]:
+ sched_queues = map(lambda app: app.event_queues,
+ self.scheds.filter(matcher))
+ return [item for sublist in sched_queues for item in sublist] + \
+ self.additional_event_queues
+
def configure_connections(self, source_only=False):
# Set up gerrit related fakes
# Set a changes database so multiple FakeGerrit's can report back to
@@ -3706,7 +3779,7 @@ class ZuulTestCase(BaseTestCase):
if con.web_server:
self.addCleanup(con.web_server.stop)
- self.event_queues.append(con.event_queue)
+ self.additional_event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
@@ -3745,7 +3818,7 @@ class ZuulTestCase(BaseTestCase):
changes_db=db,
upstream_root=self.upstream_root,
git_url_with_auth=self.git_url_with_auth)
- self.event_queues.append(con.event_queue)
+ self.additional_event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
registerGithubProjects(con)
return con
@@ -3762,7 +3835,7 @@ class ZuulTestCase(BaseTestCase):
self.rpcclient,
changes_db=db,
upstream_root=self.upstream_root)
- self.event_queues.append(con.event_queue)
+ self.additional_event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
@@ -3778,7 +3851,7 @@ class ZuulTestCase(BaseTestCase):
self.rpcclient,
changes_db=db,
upstream_root=self.upstream_root)
- self.event_queues.append(con.event_queue)
+ self.additional_event_queues.append(con.event_queue)
setattr(self, 'fake_' + name, con)
return con
@@ -4062,7 +4135,7 @@ class ZuulTestCase(BaseTestCase):
self.assertNodepoolState()
self.assertNoGeneratedKeys()
ipm = zuul.manager.independent.IndependentPipelineManager
- for tenant in self.sched.abide.tenants.values():
+ for tenant in self.scheds.first.sched.abide.tenants.values():
for pipeline in tenant.layout.pipelines.values():
if isinstance(pipeline.manager, ipm):
self.assertEqual(len(pipeline.queues), 0)
@@ -4071,21 +4144,21 @@ class ZuulTestCase(BaseTestCase):
self.log.debug("Shutting down after tests")
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
- self.executor_client.stop()
- self.merge_client.stop()
+ self.scheds.execute(lambda app: app.sched.executor.stop())
+ self.scheds.execute(lambda app: app.sched.merger.stop())
if self.merge_server:
self.merge_server.stop()
self.merge_server.join()
self.executor_server.stop()
self.executor_server.join()
- self.sched.stop()
- self.sched.join()
+ self.scheds.execute(lambda app: app.sched.stop())
+ self.scheds.execute(lambda app: app.sched.join())
self.statsd.stop()
self.statsd.join()
self.rpcclient.shutdown()
self.gearman_server.shutdown()
self.fake_nodepool.stop()
- self.zk.disconnect()
+ self.scheds.execute(lambda app: app.sched.zk.disconnect())
self.printHistory()
# We whitelist watchdog threads as they have relatively long delays
# before noticing they should exit, but they should exit on their own.
@@ -4207,159 +4280,174 @@ class ZuulTestCase(BaseTestCase):
parameters = json.loads(job.arguments)
return parameters[name]
- def haveAllBuildsReported(self):
- # See if Zuul is waiting on a meta job to complete
- if self.executor_client.meta_jobs:
- return False
- # Find out if every build that the worker has completed has been
- # reported back to Zuul. If it hasn't then that means a Gearman
- # event is still in transit and the system is not stable.
- for build in self.history:
- zbuild = self.executor_client.builds.get(build.uuid)
- if not zbuild:
- # It has already been reported
- continue
- # It hasn't been reported yet.
- return False
- # Make sure that none of the worker connections are in GRAB_WAIT
- worker = self.executor_server.executor_gearworker.gearman
- for connection in worker.active_connections:
- if connection.state == 'GRAB_WAIT':
+ def __haveAllBuildsReported(self, matcher) -> bool:
+ for app in self.scheds.filter(matcher):
+ executor_client = app.sched.executor
+ # See if Zuul is waiting on a meta job to complete
+ if executor_client.meta_jobs:
return False
+ # Find out if every build that the worker has completed has been
+ # reported back to Zuul. If it hasn't then that means a Gearman
+ # event is still in transit and the system is not stable.
+ for build in self.history:
+ zbuild = executor_client.builds.get(build.uuid)
+ if not zbuild:
+ # It has already been reported
+ continue
+ # It hasn't been reported yet.
+ return False
+ # Make sure that none of the worker connections are in GRAB_WAIT
+ worker = self.executor_server.executor_gearworker.gearman
+ for connection in worker.active_connections:
+ if connection.state == 'GRAB_WAIT':
+ return False
return True
- def areAllBuildsWaiting(self):
- builds = self.executor_client.builds.values()
- seen_builds = set()
- for build in builds:
- seen_builds.add(build.uuid)
- client_job = None
- for conn in self.executor_client.gearman.active_connections:
- for j in conn.related_jobs.values():
- if j.unique == build.uuid:
- client_job = j
- break
- if not client_job:
- self.log.debug("%s is not known to the gearman client" %
- build)
- return False
- if not client_job.handle:
- self.log.debug("%s has no handle" % client_job)
- return False
- server_job = self.gearman_server.jobs.get(client_job.handle)
- if not server_job:
- self.log.debug("%s is not known to the gearman server" %
- client_job)
- return False
- if not hasattr(server_job, 'waiting'):
- self.log.debug("%s is being enqueued" % server_job)
- return False
- if server_job.waiting:
- continue
- if build.url is None:
- self.log.debug("%s has not reported start" % build)
- return False
- # using internal ServerJob which offers no Text interface
- worker_build = self.executor_server.job_builds.get(
- server_job.unique.decode('utf8'))
- if worker_build:
- if build.paused:
- continue
- if worker_build.isWaiting():
+ def __areAllBuildsWaiting(self, matcher) -> bool:
+ for app in self.scheds.filter(matcher):
+ executor_client = app.sched.executor
+ builds = executor_client.builds.values()
+ seen_builds = set()
+ for build in builds:
+ seen_builds.add(build.uuid)
+ client_job = None
+ for conn in executor_client.gearman.active_connections:
+ for j in conn.related_jobs.values():
+ if j.unique == build.uuid:
+ client_job = j
+ break
+ if not client_job:
+ self.log.debug("%s is not known to the gearman client" %
+ build)
+ return False
+ if not client_job.handle:
+ self.log.debug("%s has no handle" % client_job)
+ return False
+ server_job = self.gearman_server.jobs.get(client_job.handle)
+ if not server_job:
+ self.log.debug("%s is not known to the gearman server" %
+ client_job)
+ return False
+ if not hasattr(server_job, 'waiting'):
+ self.log.debug("%s is being enqueued" % server_job)
+ return False
+ if server_job.waiting:
continue
- self.log.debug("%s is running" % worker_build)
- return False
- else:
- self.log.debug("%s is unassigned" % server_job)
- return False
- for (build_uuid, job_worker) in \
- self.executor_server.job_workers.items():
- if build_uuid not in seen_builds:
- self.log.debug("%s is not finalized" % build_uuid)
- return False
+ if build.url is None:
+ self.log.debug("%s has not reported start" % build)
+ return False
+ # using internal ServerJob which offers no Text interface
+ worker_build = self.executor_server.job_builds.get(
+ server_job.unique.decode('utf8'))
+ if worker_build:
+ if build.paused:
+ continue
+ if worker_build.isWaiting():
+ continue
+ self.log.debug("%s is running" % worker_build)
+ return False
+ else:
+ self.log.debug("%s is unassigned" % server_job)
+ return False
+ for (build_uuid, job_worker) in \
+ self.executor_server.job_workers.items():
+ if build_uuid not in seen_builds:
+ self.log.debug("%s is not finalized" % build_uuid)
+ return False
return True
- def areAllNodeRequestsComplete(self):
+ def __areAllNodeRequestsComplete(self, matcher) -> bool:
if self.fake_nodepool.paused:
return True
- if self.sched.nodepool.requests:
- return False
+ for app in self.scheds.filter(matcher):
+ if app.sched.nodepool.requests:
+ return False
return True
- def areAllMergeJobsWaiting(self):
- for client_job in list(self.merge_client.jobs):
- if not client_job.handle:
- self.log.debug("%s has no handle" % client_job)
- return False
- server_job = self.gearman_server.jobs.get(client_job.handle)
- if not server_job:
- self.log.debug("%s is not known to the gearman server" %
- client_job)
- return False
- if not hasattr(server_job, 'waiting'):
- self.log.debug("%s is being enqueued" % server_job)
+ def __areAllMergeJobsWaiting(self, matcher) -> bool:
+ for app in self.scheds.filter(matcher):
+ merge_client = app.sched.merger
+ for client_job in list(merge_client.jobs):
+ if not client_job.handle:
+ self.log.debug("%s has no handle" % client_job)
+ return False
+ server_job = self.gearman_server.jobs.get(client_job.handle)
+ if not server_job:
+ self.log.debug("%s is not known to the gearman server" %
+ client_job)
+ return False
+ if not hasattr(server_job, 'waiting'):
+ self.log.debug("%s is being enqueued" % server_job)
+ return False
+ if server_job.waiting:
+ self.log.debug("%s is waiting" % server_job)
+ continue
+ self.log.debug("%s is not waiting" % server_job)
return False
- if server_job.waiting:
- self.log.debug("%s is waiting" % server_job)
- continue
- self.log.debug("%s is not waiting" % server_job)
- return False
return True
- def eventQueuesEmpty(self):
- for event_queue in self.event_queues:
+ def __eventQueuesEmpty(self, matcher)\
+ -> Generator[bool, None, None]:
+ for event_queue in self.__event_queues(matcher):
yield event_queue.empty()
- def eventQueuesJoin(self):
- for event_queue in self.event_queues:
+ def __eventQueuesJoin(self, matcher) -> None:
+ for app in self.scheds.filter(matcher):
+ for event_queue in app.event_queues:
+ event_queue.join()
+ for event_queue in self.additional_event_queues:
event_queue.join()
- def waitUntilSettled(self, msg=""):
+ def waitUntilSettled(self, msg="", matcher=None) -> None:
self.log.debug("Waiting until settled... (%s)", msg)
start = time.time()
while True:
if time.time() - start > self.wait_timeout:
self.log.error("Timeout waiting for Zuul to settle")
self.log.error("Queue status:")
- for event_queue in self.event_queues:
+ for event_queue in self.__event_queues(matcher):
self.log.error(" %s: %s" %
(event_queue, event_queue.empty()))
self.log.error("All builds waiting: %s" %
- (self.areAllBuildsWaiting(),))
+ (self.__areAllBuildsWaiting(matcher),))
self.log.error("All builds reported: %s" %
- (self.haveAllBuildsReported(),))
+ (self.__haveAllBuildsReported(matcher),))
self.log.error("All requests completed: %s" %
- (self.areAllNodeRequestsComplete(),))
- self.log.error("Merge client jobs: %s" %
- (self.merge_client.jobs,))
+ (self.__areAllNodeRequestsComplete(matcher),))
+ for app in self.scheds.filter(matcher):
+ self.log.error("[Sched: %s] Merge client jobs: %s" %
+ (app.sched, app.sched.merger.jobs,))
raise Exception("Timeout waiting for Zuul to settle")
# Make sure no new events show up while we're checking
self.executor_server.lock.acquire()
# have all build states propogated to zuul?
- if self.haveAllBuildsReported():
+ if self.__haveAllBuildsReported(matcher):
# Join ensures that the queue is empty _and_ events have been
# processed
- self.eventQueuesJoin()
- self.sched.run_handler_lock.acquire()
- if (self.areAllMergeJobsWaiting() and
- self.haveAllBuildsReported() and
- self.areAllBuildsWaiting() and
- self.areAllNodeRequestsComplete() and
- all(self.eventQueuesEmpty())):
+ self.__eventQueuesJoin(matcher)
+ self.scheds.execute(
+ lambda app: app.sched.run_handler_lock.acquire())
+ if (self.__areAllMergeJobsWaiting(matcher) and
+ self.__haveAllBuildsReported(matcher) and
+ self.__areAllBuildsWaiting(matcher) and
+ self.__areAllNodeRequestsComplete(matcher) and
+ all(self.__eventQueuesEmpty(matcher))):
# The queue empty check is placed at the end to
# ensure that if a component adds an event between
# when locked the run handler and checked that the
# components were stable, we don't erroneously
# report that we are settled.
- self.sched.run_handler_lock.release()
+ self.scheds.execute(
+ lambda app: app.sched.run_handler_lock.release())
self.executor_server.lock.release()
self.log.debug("...settled. (%s)", msg)
self.logState()
return
- self.sched.run_handler_lock.release()
+ self.scheds.execute(
+ lambda app: app.sched.run_handler_lock.release())
self.executor_server.lock.release()
- self.sched.wake_event.wait(0.1)
+ self.scheds.execute(lambda app: app.sched.wake_event.wait(0.1))
def waitForPoll(self, poller, timeout=30):
self.log.debug("Wait for poll on %s", poller)
@@ -4378,7 +4466,7 @@ class ZuulTestCase(BaseTestCase):
self.log.info("Completed build: %s" % build)
for build in self.builds:
self.log.info("Running build: %s" % build)
- for tenant in self.sched.abide.tenants.values():
+ for tenant in self.scheds.first.sched.abide.tenants.values():
for pipeline in tenant.layout.pipelines.values():
for pipeline_queue in pipeline.queues:
if len(pipeline_queue.queue) != 0:
@@ -4422,7 +4510,7 @@ class ZuulTestCase(BaseTestCase):
def assertEmptyQueues(self):
# Make sure there are no orphaned jobs
- for tenant in self.sched.abide.tenants.values():
+ for tenant in self.scheds.first.sched.abide.tenants.values():
for pipeline in tenant.layout.pipelines.values():
for pipeline_queue in pipeline.queues:
if len(pipeline_queue.queue) != 0:
diff --git a/tests/fakegithub.py b/tests/fakegithub.py
index bbcbb6aa2..558d23471 100644
--- a/tests/fakegithub.py
+++ b/tests/fakegithub.py
@@ -555,8 +555,8 @@ class FakeResponse(object):
class FakeGithubSession(object):
- def __init__(self, data):
- self._data = data
+ def __init__(self, client):
+ self.client = client
self.headers = CaseInsensitiveDict()
self._base_url = None
self.schema = graphene.Schema(query=FakeGithubQuery)
@@ -584,7 +584,7 @@ class FakeGithubSession(object):
query = json.get('query')
variables = json.get('variables')
result = self.schema.execute(
- query, variables=variables, context=self._data)
+ query, variables=variables, context=self.client._data)
return FakeResponse({'data': result.data}, 200)
return FakeResponse(None, 404)
@@ -593,8 +593,7 @@ class FakeGithubSession(object):
org, project, request = request.split('/', 2)
project_name = '{}/{}'.format(org, project)
- client = FakeGithubClient(self._data)
- repo = client.repo_from_project(project_name)
+ repo = self.client.repo_from_project(project_name)
return repo.get_url(request, params=params)
@@ -618,7 +617,7 @@ class FakeGithubClient(object):
def __init__(self, data, inst_id=None):
self._data = data
self._inst_id = inst_id
- self.session = FakeGithubSession(data)
+ self.session = FakeGithubSession(self)
def user(self, login):
return FakeUser(login)
diff --git a/tests/fixtures/config/broken-multi-tenant/git/common-config/playbooks/job.yaml b/tests/fixtures/config/broken-multi-tenant/git/common-config/playbooks/job.yaml
new file mode 100644
index 000000000..f679dceae
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/common-config/playbooks/job.yaml
@@ -0,0 +1,2 @@
+- hosts: all
+ tasks: []
diff --git a/tests/fixtures/config/broken-multi-tenant/git/common-config/zuul.yaml b/tests/fixtures/config/broken-multi-tenant/git/common-config/zuul.yaml
new file mode 100644
index 000000000..406a64248
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/common-config/zuul.yaml
@@ -0,0 +1,23 @@
+- pipeline:
+ name: check
+ manager: independent
+ trigger:
+ gerrit:
+ - event: patchset-created
+ success:
+ gerrit:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+
+- job:
+ name: base
+ parent: null
+ run: playbooks/job.yaml
+
+- project:
+ name: ^.*
+ check:
+ jobs:
+ - base
diff --git a/tests/fixtures/config/broken-multi-tenant/git/org_project1/README b/tests/fixtures/config/broken-multi-tenant/git/org_project1/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/org_project1/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/broken-multi-tenant/git/org_project2/README b/tests/fixtures/config/broken-multi-tenant/git/org_project2/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/org_project2/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/broken-multi-tenant/git/org_project2/zuul.yaml b/tests/fixtures/config/broken-multi-tenant/git/org_project2/zuul.yaml
new file mode 100644
index 000000000..40f68b640
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/org_project2/zuul.yaml
@@ -0,0 +1,3 @@
+- job:
+ name: child-job
+ parent: parent-job
diff --git a/tests/fixtures/config/broken-multi-tenant/git/org_project3/README b/tests/fixtures/config/broken-multi-tenant/git/org_project3/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/org_project3/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/broken-multi-tenant/git/org_project3/zuul.yaml b/tests/fixtures/config/broken-multi-tenant/git/org_project3/zuul.yaml
new file mode 100644
index 000000000..beef1faa0
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/git/org_project3/zuul.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: parent-job
diff --git a/tests/fixtures/config/broken-multi-tenant/main.yaml b/tests/fixtures/config/broken-multi-tenant/main.yaml
new file mode 100644
index 000000000..053056e9f
--- /dev/null
+++ b/tests/fixtures/config/broken-multi-tenant/main.yaml
@@ -0,0 +1,19 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project2
+ - org/project3
+
+- tenant:
+ name: tenant-two
+ source:
+ gerrit:
+ config-projects:
+ - common-config
+ untrusted-projects:
+ - org/project1
+ - org/project2
diff --git a/tests/fixtures/config/gerrit-file-comments/git/org_project/playbooks/file-comments.yaml b/tests/fixtures/config/gerrit-file-comments/git/org_project/playbooks/file-comments.yaml
index b281af233..97400bab3 100644
--- a/tests/fixtures/config/gerrit-file-comments/git/org_project/playbooks/file-comments.yaml
+++ b/tests/fixtures/config/gerrit-file-comments/git/org_project/playbooks/file-comments.yaml
@@ -9,6 +9,9 @@
message: line too long
- line: 82
message: line too short
+ - line: 2
+ message: levels are ignored by gerrit
+ level: warning
otherfile.txt:
- line: 21
message: |
diff --git a/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments-error.yaml b/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments-error.yaml
index a87c1348b..0556ae40e 100644
--- a/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments-error.yaml
+++ b/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments-error.yaml
@@ -19,6 +19,9 @@
- line: 7 # No message provided
- line: 9999
message: Line is not part of the file
+ - line: 3
+ message: Invalid level will fall back to warning
+ level: invalid-level
missingfile.txt:
- line: 1
message: "Missing file"
diff --git a/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments.yaml b/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments.yaml
index 5838954e9..62f70b1fa 100644
--- a/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments.yaml
+++ b/tests/fixtures/config/github-file-comments/git/org_project/playbooks/file-comments.yaml
@@ -7,8 +7,12 @@
README:
- line: 1
message: "Simple line annotation"
+ - line: 2
+ message: "Line annotation with level"
+ level: warning
- line: 6
message: "simple range annotation"
+ level: info
range:
start_line: 4
start_character: 0
@@ -16,6 +20,7 @@
end_character: 10
- line: 7
message: "Columns must be part of the same line"
+ level: error
range:
start_line: 7
start_character: 13
diff --git a/tests/fixtures/config/zuul-environment-filter/git/common-config/playbooks/zuul-environment-filter.yaml b/tests/fixtures/config/zuul-environment-filter/git/common-config/playbooks/zuul-environment-filter.yaml
new file mode 100644
index 000000000..04ffb7815
--- /dev/null
+++ b/tests/fixtures/config/zuul-environment-filter/git/common-config/playbooks/zuul-environment-filter.yaml
@@ -0,0 +1,10 @@
+- hosts: all
+ tasks:
+ - debug:
+ var: lookup('env', 'ZUUL_TEST_VAR')
+ - assert:
+ that: lookup('env', 'ZUUL_TEST_VAR') == ""
+ - debug:
+ var: lookup('env', 'TEST_VAR')
+ - assert:
+ that: lookup('env', 'TEST_VAR') == "not-empty"
diff --git a/tests/fixtures/config/zuul-environment-filter/git/common-config/zuul.yaml b/tests/fixtures/config/zuul-environment-filter/git/common-config/zuul.yaml
new file mode 100644
index 000000000..9097c596e
--- /dev/null
+++ b/tests/fixtures/config/zuul-environment-filter/git/common-config/zuul.yaml
@@ -0,0 +1,21 @@
+- pipeline:
+ name: promote
+ manager: supercedent
+ post-review: true
+ trigger:
+ gerrit:
+ - event: change-merged
+
+- job:
+ name: zuul-environment-filter
+ parent: null
+ run: playbooks/zuul-environment-filter.yaml
+ nodeset:
+ nodes:
+ - name: ubuntu-xenial
+ label: ubuntu-xenial
+
+- project:
+ promote:
+ jobs:
+ - zuul-environment-filter
diff --git a/tests/fixtures/config/zuul-environment-filter/main.yaml b/tests/fixtures/config/zuul-environment-filter/main.yaml
new file mode 100644
index 000000000..9d01f542f
--- /dev/null
+++ b/tests/fixtures/config/zuul-environment-filter/main.yaml
@@ -0,0 +1,6 @@
+- tenant:
+ name: tenant-one
+ source:
+ gerrit:
+ config-projects:
+ - common-config
diff --git a/tests/fixtures/config/zuultrigger/project-change-merged/git/common-config/zuul.yaml b/tests/fixtures/config/zuultrigger/project-change-merged/git/common-config/zuul.yaml
index 045e0a988..77a96c25b 100644
--- a/tests/fixtures/config/zuultrigger/project-change-merged/git/common-config/zuul.yaml
+++ b/tests/fixtures/config/zuultrigger/project-change-merged/git/common-config/zuul.yaml
@@ -36,6 +36,10 @@
name: merge-check
manager: independent
ignore-dependencies: true
+ reject:
+ gerrit:
+ approval:
+ - Verified: -1
trigger:
zuul:
- event: project-change-merged
diff --git a/tests/fixtures/layouts/requirements-github.yaml b/tests/fixtures/layouts/requirements-github.yaml
index f5fa0f5de..9e376b8b6 100644
--- a/tests/fixtures/layouts/requirements-github.yaml
+++ b/tests/fixtures/layouts/requirements-github.yaml
@@ -89,6 +89,25 @@
check: success
- pipeline:
+ name: require_check_run
+ manager: independent
+ require:
+ github:
+ status:
+ # Github does not differentiate between status and check run
+ # in case of branch protection and required status checks.
+ - check-run:tenant-one/check:success
+ trigger:
+ github:
+ - event: pull_request
+ action: comment
+ comment: trigger me
+ success:
+ github:
+ check: success
+
+
+- pipeline:
name: trigger
manager: independent
trigger:
@@ -367,6 +386,10 @@
name: project15-check-run
run: playbooks/project15-check-run.yaml
+- job:
+ name: project16-require-check-run
+ run: playbooks/project16-require-check-run.yaml
+
- project:
name: org/project1
pipeline:
@@ -465,3 +488,9 @@
trigger_check_run:
jobs:
- project15-check-run
+
+- project:
+ name: org/project16
+ require_check_run:
+ jobs:
+ - project16-require-check-run
diff --git a/tests/fixtures/layouts/requirements-pagure.yaml b/tests/fixtures/layouts/requirements-pagure.yaml
index 998fa7fdb..4a95d8f08 100644
--- a/tests/fixtures/layouts/requirements-pagure.yaml
+++ b/tests/fixtures/layouts/requirements-pagure.yaml
@@ -67,6 +67,18 @@
status: 'success'
- pipeline:
+ name: require-flag
+ manager: independent
+ require:
+ pagure:
+ status: success
+ trigger:
+ pagure:
+ - event: pg_pull_request
+ action: status
+ status: success
+
+- pipeline:
name: require-trigger-pg-closed-merged
precedence: high
manager: independent
@@ -125,4 +137,10 @@
name: org/project6
require-trigger-pg-closed-merged:
jobs:
- - project-test \ No newline at end of file
+ - project-test
+
+- project:
+ name: org/project7
+ require-flag:
+ jobs:
+ - project-test
diff --git a/tests/fixtures/layouts/serial.yaml b/tests/fixtures/layouts/serial.yaml
new file mode 100644
index 000000000..5a744ce82
--- /dev/null
+++ b/tests/fixtures/layouts/serial.yaml
@@ -0,0 +1,48 @@
+- pipeline:
+ name: deploy
+ manager: serial
+ trigger:
+ gerrit:
+ - event: change-merged
+ post-review: True
+ success:
+ gerrit: {}
+ failure:
+ gerrit: {}
+
+- job:
+ name: base
+ parent: null
+ nodeset:
+ nodes:
+ - label: ubuntu-xenial
+ name: controller
+
+- job:
+ name: job1
+ run: playbooks/job1.yaml
+
+- job:
+ name: job2
+ run: playbooks/job2.yaml
+
+- project:
+ name: org/project
+ deploy:
+ jobs:
+ - job1
+ - job2
+
+- project:
+ name: org/project1
+ deploy:
+ queue: shared
+ jobs:
+ - job1
+
+- project:
+ name: org/project2
+ deploy:
+ queue: shared
+ jobs:
+ - job1
diff --git a/tests/fixtures/zuul-admin-web-oidc.conf b/tests/fixtures/zuul-admin-web-oidc.conf
new file mode 100644
index 000000000..33e5136b5
--- /dev/null
+++ b/tests/fixtures/zuul-admin-web-oidc.conf
@@ -0,0 +1,45 @@
+[gearman]
+server=127.0.0.1
+
+[scheduler]
+tenant_config=main.yaml
+relative_priority=true
+
+[merger]
+git_dir=/tmp/zuul-test/merger-git
+git_user_email=zuul@example.com
+git_user_name=zuul
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+
+[connection gerrit]
+driver=gerrit
+server=review.example.com
+user=jenkins
+sshkey=fake_id_rsa_path
+
+[web]
+static_cache_expiry=1200
+
+[auth zuul_operator]
+driver=HS256
+allow_authz_override=true
+realm=zuul.example.com
+client_id=zuul.example.com
+issuer_id=zuul_operator
+secret=NoDanaOnlyZuul
+
+[auth myOIDC1]
+driver=OpenIDConnect
+realm=myOIDC1
+default=true
+client_id=zuul
+issuer_id=http://oidc1
+
+[auth myOIDC2]
+driver=OpenIDConnect
+realm=myOIDC2
+client_id=zuul
+issuer_id=http://oidc2
+scope=openid profile email special-scope
diff --git a/tests/fixtures/zuul-pagure-driver-whitelist.conf b/tests/fixtures/zuul-pagure-driver-whitelist.conf
new file mode 100644
index 000000000..1b5d55b31
--- /dev/null
+++ b/tests/fixtures/zuul-pagure-driver-whitelist.conf
@@ -0,0 +1,19 @@
+[gearman]
+server=127.0.0.1
+
+[web]
+status_url=http://zuul.example.com/status/#{change.number},{change.patchset}
+
+[merger]
+git_dir=/tmp/zuul-test/git
+git_user_email=zuul@example.com
+git_user_name=zuul
+
+[executor]
+git_dir=/tmp/zuul-test/executor-git
+
+[connection pagure]
+driver=pagure
+server=pagure
+api_token=0000000000000000000000000000000000000000
+source_whitelist=::ffff:127.0.0.1
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index 16c32a8ef..cf156e156 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -38,7 +38,7 @@ class TestTenantSimple(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/simple.yaml'
def test_tenant_simple(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
@@ -70,7 +70,7 @@ class TestTenantSimple(TenantParserTestCase):
project2_config[1].pipelines['check'].job_list.jobs)
def test_variant_description(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
job = tenant.layout.jobs.get("project2-job")
self.assertEqual(job[0].variant_description, "")
self.assertEqual(job[1].variant_description, "stable")
@@ -80,7 +80,7 @@ class TestTenantOverride(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/override.yaml'
def test_tenant_override(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2', 'org/project4'],
@@ -116,7 +116,7 @@ class TestTenantGroups(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups.yaml'
def test_tenant_groups(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
@@ -153,7 +153,7 @@ class TestTenantGroups2(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups2.yaml'
def test_tenant_groups2(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2', 'org/project3'],
@@ -190,7 +190,7 @@ class TestTenantGroups3(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups3.yaml'
def test_tenant_groups3(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(False, tenant.exclude_unprotected_branches)
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
@@ -226,7 +226,7 @@ class TestTenantGroups4(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups4.yaml'
def test_tenant_groups(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1', 'org/project2'],
@@ -255,7 +255,7 @@ class TestTenantGroups4(TenantParserTestCase):
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
new_layout = tenant.layout
self.assertEqual(old_layout, new_layout)
@@ -265,7 +265,7 @@ class TestTenantGroups5(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/groups5.yaml'
def test_tenant_single_projet_exclude(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(['common-config'],
[x.name for x in tenant.config_projects])
self.assertEqual(['org/project1'],
@@ -296,7 +296,7 @@ class TestTenantUnprotectedBranches(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/unprotected-branches.yaml'
def test_tenant_unprotected_branches(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(True, tenant.exclude_unprotected_branches)
self.assertEqual(['common-config'],
@@ -325,7 +325,7 @@ class TestTenantExcludeAll(TenantParserTestCase):
"""
# The config in org/project5 depends on config in org/project1 so
# validate that there are no config errors in that tenant.
- tenant_two = self.sched.abide.tenants.get('tenant-two')
+ tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.assertEquals(
len(tenant_two.layout.loading_errors), 0,
"No error should have been accumulated")
@@ -335,7 +335,7 @@ class TestTenantConfigBranches(ZuulTestCase):
tenant_config_file = 'config/tenant-parser/simple.yaml'
def _validate_job(self, job, branch):
- tenant_one = self.sched.abide.tenants.get('tenant-one')
+ tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant_one.layout.getJobs(job)
self.assertEquals(len(jobs), 1)
self.assertIn(jobs[0].source_context.branch, branch)
@@ -353,14 +353,14 @@ class TestTenantConfigBranches(ZuulTestCase):
self.create_branch('common-config', 'stable')
self.create_branch('common-config', 'feat_x')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Job must be defined in master
self._validate_job(common_job, 'master')
# Reconfigure with load-branch stable for common-config
self.newTenantConfig('config/tenant-parser/branch.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Now job must be defined on stable branch
self._validate_job(common_job, 'stable')
@@ -370,7 +370,7 @@ class TestSplitConfig(ZuulTestCase):
tenant_config_file = 'config/split-config/main.yaml'
def test_split_config(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertIn('project-test1', tenant.layout.jobs)
self.assertIn('project-test2', tenant.layout.jobs)
test1 = tenant.layout.getJob('project-test1')
@@ -445,7 +445,7 @@ class TestConfigConflict(ZuulTestCase):
tenant_config_file = 'config/conflict-config/main.yaml'
def test_conflict_config(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = sorted(tenant.layout.jobs.keys())
self.assertEqual(
['base', 'noop', 'trusted-zuul.yaml-job',
@@ -457,9 +457,11 @@ class TestAuthorizationRuleParser(ZuulTestCase):
tenant_config_file = 'config/tenant-parser/authorizations.yaml'
def test_rules_are_loaded(self):
- rules = self.sched.abide.admin_rules
- self.assertTrue('auth-rule-one' in rules, self.sched.abide)
- self.assertTrue('auth-rule-two' in rules, self.sched.abide)
+ rules = self.scheds.first.sched.abide.admin_rules
+ self.assertTrue('auth-rule-one' in rules,
+ self.scheds.first.sched.abide)
+ self.assertTrue('auth-rule-two' in rules,
+ self.scheds.first.sched.abide)
claims_1 = {'sub': 'venkman'}
claims_2 = {'sub': 'gozer',
'iss': 'another_dimension'}
@@ -591,39 +593,32 @@ class TestAuthorizationRuleParserWithTemplating(ZuulTestCase):
tenant_config_file = 'config/tenant-parser/authorizations-templating.yaml'
def test_rules_are_loaded(self):
- rules = self.sched.abide.admin_rules
- self.assertTrue('tenant-admin' in rules, self.sched.abide)
- self.assertTrue('tenant-admin-complex' in rules, self.sched.abide)
+ rules = self.scheds.first.sched.abide.admin_rules
+ self.assertTrue('tenant-admin' in rules, self.scheds.first.sched.abide)
+ self.assertTrue('tenant-admin-complex' in rules,
+ self.scheds.first.sched.abide)
def test_tenant_substitution(self):
claims_1 = {'group': 'tenant-one-admin'}
claims_2 = {'group': 'tenant-two-admin'}
- rules = self.sched.abide.admin_rules
- tenant_one = self.sched.abide.tenants.get('tenant-one')
- tenant_two = self.sched.abide.tenants.get('tenant-two')
- self.assertTrue(rules['tenant-admin'](claims_1,
- tenant_one))
- self.assertTrue(rules['tenant-admin'](claims_2,
- tenant_two))
- self.assertTrue(not rules['tenant-admin'](claims_1,
- tenant_two))
- self.assertTrue(not rules['tenant-admin'](claims_2,
- tenant_one))
+ rules = self.scheds.first.sched.abide.admin_rules
+ tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
+ self.assertTrue(rules['tenant-admin'](claims_1, tenant_one))
+ self.assertTrue(rules['tenant-admin'](claims_2, tenant_two))
+ self.assertTrue(not rules['tenant-admin'](claims_1, tenant_two))
+ self.assertTrue(not rules['tenant-admin'](claims_2, tenant_one))
def test_tenant_substitution_in_list(self):
claims_1 = {'group': ['tenant-one-admin', 'some-other-tenant']}
claims_2 = {'group': ['tenant-two-admin', 'some-other-tenant']}
- rules = self.sched.abide.admin_rules
- tenant_one = self.sched.abide.tenants.get('tenant-one')
- tenant_two = self.sched.abide.tenants.get('tenant-two')
- self.assertTrue(rules['tenant-admin'](claims_1,
- tenant_one))
- self.assertTrue(rules['tenant-admin'](claims_2,
- tenant_two))
- self.assertTrue(not rules['tenant-admin'](claims_1,
- tenant_two))
- self.assertTrue(not rules['tenant-admin'](claims_2,
- tenant_one))
+ rules = self.scheds.first.sched.abide.admin_rules
+ tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
+ self.assertTrue(rules['tenant-admin'](claims_1, tenant_one))
+ self.assertTrue(rules['tenant-admin'](claims_2, tenant_two))
+ self.assertTrue(not rules['tenant-admin'](claims_1, tenant_two))
+ self.assertTrue(not rules['tenant-admin'](claims_2, tenant_one))
def test_tenant_substitution_in_dict(self):
claims_2 = {
@@ -633,21 +628,19 @@ class TestAuthorizationRuleParserWithTemplating(ZuulTestCase):
}
}
}
- rules = self.sched.abide.admin_rules
- tenant_one = self.sched.abide.tenants.get('tenant-one')
- tenant_two = self.sched.abide.tenants.get('tenant-two')
- self.assertTrue(
- not rules['tenant-admin-complex'](claims_2,
- tenant_one))
- self.assertTrue(
- rules['tenant-admin-complex'](claims_2, tenant_two))
+ rules = self.scheds.first.sched.abide.admin_rules
+ tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
+ self.assertTrue(not rules['tenant-admin-complex'](claims_2,
+ tenant_one))
+ self.assertTrue(rules['tenant-admin-complex'](claims_2, tenant_two))
class TestTenantExtra(TenantParserTestCase):
tenant_config_file = 'config/tenant-parser/extra.yaml'
def test_tenant_extra(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertTrue('project2-extra-file' in tenant.layout.jobs)
self.assertTrue('project2-extra-dir' in tenant.layout.jobs)
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index 98048eacf..d9b84fa6d 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -122,7 +122,7 @@ class TestSQLConnection(ZuulDBTestCase):
def check_results(connection_name):
# Grab the sa tables
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
reporter = _get_reporter_from_connection_name(
tenant.layout.pipelines['check'].success_actions,
connection_name
@@ -241,7 +241,7 @@ class TestSQLConnection(ZuulDBTestCase):
def check_results(connection_name_1, connection_name_2):
# Grab the sa tables for resultsdb
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
reporter1 = _get_reporter_from_connection_name(
tenant.layout.pipelines['check'].success_actions,
connection_name_1
@@ -308,7 +308,7 @@ class TestConnectionsBadSQL(ZuulDBTestCase):
"Test the SQL reporter fails gracefully when unable to connect"
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-sql-reporter.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Trigger a reporter. If no errors are raised, the reporter has been
# disabled correctly
diff --git a/tests/unit/test_cross_crd.py b/tests/unit/test_cross_crd.py
index e82e43688..f418d6f93 100644
--- a/tests/unit/test_cross_crd.py
+++ b/tests/unit/test_cross_crd.py
@@ -274,7 +274,7 @@ class TestGerritToGithubCRD(ZuulTestCase):
self.assertEqual(changes, '1,%s 1,1' %
(B.head_sha,))
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_duplicate(self):
@@ -288,7 +288,7 @@ class TestGerritToGithubCRD(ZuulTestCase):
# A Depends-On: B
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two dependent changes...
@@ -346,11 +346,11 @@ class TestGerritToGithubCRD(ZuulTestCase):
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Make sure the items still share a change queue, and the
# first one is not live.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
@@ -438,8 +438,12 @@ class TestGerritToGithubCRD(ZuulTestCase):
A.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
A.subject, B.url)
- # Make sure zuul has seen an event on B.
+ # Make sure zuul has seen an event on B. This is necessary
+ # in order to populate our fake github project db.
self.fake_github.emitEvent(B.getPullRequestEditedEvent())
+ # Note we wait until settled here as the event processing for
+ # the next event may not have the updated db yet otherwise.
+ self.waitUntilSettled()
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
@@ -725,7 +729,7 @@ class TestGithubToGerritCRD(ZuulTestCase):
self.assertEqual(changes, '1,1 1,%s' %
(A.head_sha,))
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_duplicate(self):
@@ -738,7 +742,7 @@ class TestGithubToGerritCRD(ZuulTestCase):
# A Depends-On: B
A.editBody('Depends-On: %s\n' % (B.data['url'],))
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two dependent changes...
@@ -795,11 +799,11 @@ class TestGithubToGerritCRD(ZuulTestCase):
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Make sure the items still share a change queue, and the
# first one is not live.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
@@ -900,6 +904,9 @@ class TestGithubToGerritCRD(ZuulTestCase):
# Make sure zuul has seen an event on B.
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ # Note we wait until settled here as the event processing for
+ # the next event may not have the updated db yet otherwise.
+ self.waitUntilSettled()
self.fake_github.emitEvent(A.getPullRequestEditedEvent())
self.waitUntilSettled()
diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py
index b85780f1c..c0fbc5546 100644
--- a/tests/unit/test_executor.py
+++ b/tests/unit/test_executor.py
@@ -281,7 +281,7 @@ class TestExecutorRepos(ZuulTestCase):
# Start timer trigger - also org/project
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-timer-override.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
@@ -292,7 +292,7 @@ class TestExecutorRepos(ZuulTestCase):
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-no-timer-override.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -320,7 +320,7 @@ class TestExecutorRepos(ZuulTestCase):
# Start timer trigger - also org/project
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
p1 = 'review.example.com/org/project1'
projects = [p1]
@@ -339,7 +339,7 @@ class TestExecutorRepos(ZuulTestCase):
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config',
'layouts/repo-checkout-no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -816,6 +816,21 @@ class TestExecutorFacts(AnsibleZuulTestCase):
self.assertEqual(18, len(date_time))
+class TestExecutorEnvironment(AnsibleZuulTestCase):
+ tenant_config_file = 'config/zuul-environment-filter/main.yaml'
+
+ @mock.patch.dict('os.environ', {'ZUUL_TEST_VAR': 'some-value',
+ 'TEST_VAR': 'not-empty'})
+ def test_zuul_environment_filter(self):
+ A = self.fake_gerrit.addFakeChange('common-config', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.assertEqual(
+ self.getJobFromHistory('zuul-environment-filter').result,
+ 'SUCCESS')
+
+
class TestExecutorStart(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
@@ -827,3 +842,27 @@ class TestExecutorStart(ZuulTestCase):
def test_executor_start(self):
self.assertFalse(os.path.exists(self.junk_dir))
+
+
+class TestExecutorExtraPackages(AnsibleZuulTestCase):
+ tenant_config_file = 'config/single-tenant/main.yaml'
+
+ test_package = 'pywinrm'
+
+ def setUp(self):
+ super(TestExecutorExtraPackages, self).setUp()
+ import subprocess
+ ansible_manager = self.executor_server.ansible_manager
+ for version in ansible_manager._supported_versions:
+ command = [ansible_manager.getAnsibleCommand(version, 'pip'),
+ 'uninstall', '-y', self.test_package]
+ subprocess.run(command)
+
+ @mock.patch('zuul.lib.ansible.ManagedAnsible.extra_packages',
+ new_callable=mock.PropertyMock)
+ def test_extra_packages(self, mock_extra_packages):
+ mock_extra_packages.return_value = [self.test_package]
+ ansible_manager = self.executor_server.ansible_manager
+ self.assertFalse(ansible_manager.validate())
+ ansible_manager.install()
+ self.assertTrue(ansible_manager.validate())
diff --git a/tests/unit/test_gerrit.py b/tests/unit/test_gerrit.py
index 5136c8ffe..7b4d874ff 100644
--- a/tests/unit/test_gerrit.py
+++ b/tests/unit/test_gerrit.py
@@ -246,9 +246,22 @@ class TestFileComments(AnsibleZuulTestCase):
'SUCCESS')
self.assertEqual(self.getJobFromHistory('file-comments-error').result,
'SUCCESS')
- self.assertEqual(len(A.comments), 3)
+ self.assertEqual(len(A.comments), 4)
comments = sorted(A.comments, key=lambda x: x['line'])
- self.assertEqual(comments[0],
+ self.assertEqual(
+ comments[0],
+ {
+ "file": "path/to/file.py",
+ "line": 2,
+ "message": "levels are ignored by gerrit",
+ "reviewer": {
+ "email": "zuul@example.com",
+ "name": "Zuul",
+ "username": "jenkins",
+ },
+ },
+ )
+ self.assertEqual(comments[1],
{'file': 'otherfile.txt',
'line': 21,
'message': 'This is a much longer message.\n\n'
@@ -257,7 +270,7 @@ class TestFileComments(AnsibleZuulTestCase):
'name': 'Zuul',
'username': 'jenkins'}}
)
- self.assertEqual(comments[1],
+ self.assertEqual(comments[2],
{'file': 'path/to/file.py',
'line': 42,
'message': 'line too long',
@@ -265,7 +278,7 @@ class TestFileComments(AnsibleZuulTestCase):
'name': 'Zuul',
'username': 'jenkins'}}
)
- self.assertEqual(comments[2],
+ self.assertEqual(comments[3],
{'file': 'path/to/file.py',
'line': 82,
'message': 'line too short',
@@ -345,7 +358,7 @@ class TestChecksApi(ZuulTestCase):
self.fake_gerrit.addFakeChecker(uuid='zuul_check:abcd',
repository='org/project',
status='ENABLED')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
diff --git a/tests/unit/test_gerrit_crd.py b/tests/unit/test_gerrit_crd.py
index 2e8d6323a..087a583c6 100644
--- a/tests/unit/test_gerrit_crd.py
+++ b/tests/unit/test_gerrit_crd.py
@@ -425,7 +425,7 @@ class TestGerritCRD(ZuulTestCase):
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_git_depends(self):
@@ -453,7 +453,7 @@ class TestGerritCRD(ZuulTestCase):
self.assertEqual(self.history[0].changes, '1,1')
self.assertEqual(self.history[-1].changes, '1,1 2,1')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
@@ -464,7 +464,7 @@ class TestGerritCRD(ZuulTestCase):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes...
@@ -516,11 +516,11 @@ class TestGerritCRD(ZuulTestCase):
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Make sure the items still share a change queue, and the
# first one is not live.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
@@ -573,7 +573,7 @@ class TestGerritCRD(ZuulTestCase):
# Make sure none of the items share a change queue, and all
# are live.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(len(check_pipeline.queues), 3)
self.assertEqual(len(check_pipeline.getAllItems()), 3)
@@ -767,7 +767,7 @@ class TestGerritCRDAltBaseUrl(ZuulTestCase):
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
diff --git a/tests/unit/test_gerrit_legacy_crd.py b/tests/unit/test_gerrit_legacy_crd.py
index 1ae2f862b..1a4fdd69e 100644
--- a/tests/unit/test_gerrit_legacy_crd.py
+++ b/tests/unit/test_gerrit_legacy_crd.py
@@ -354,7 +354,7 @@ class TestGerritLegacyCRD(ZuulTestCase):
self.assertEqual(B.reported, 0)
self.assertEqual(self.history[0].changes, '2,1 1,1')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
def test_crd_check_git_depends(self):
@@ -382,7 +382,7 @@ class TestGerritLegacyCRD(ZuulTestCase):
self.assertEqual(self.history[0].changes, '1,1')
self.assertEqual(self.history[-1].changes, '1,1 2,1')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
@@ -393,7 +393,7 @@ class TestGerritLegacyCRD(ZuulTestCase):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes...
@@ -445,11 +445,11 @@ class TestGerritLegacyCRD(ZuulTestCase):
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Make sure the items still share a change queue, and the
# first one is not live.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 1)
queue = tenant.layout.pipelines['check'].queues[0]
first_item = queue.queue[0]
@@ -502,7 +502,7 @@ class TestGerritLegacyCRD(ZuulTestCase):
# Make sure none of the items share a change queue, and all
# are live.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(len(check_pipeline.queues), 3)
self.assertEqual(len(check_pipeline.getAllItems()), 3)
diff --git a/tests/unit/test_git_driver.py b/tests/unit/test_git_driver.py
index cc0444757..06e2ac7c8 100644
--- a/tests/unit/test_git_driver.py
+++ b/tests/unit/test_git_driver.py
@@ -26,8 +26,8 @@ class TestGitDriver(ZuulTestCase):
def setUp(self):
super(TestGitDriver, self).setUp()
- self.git_connection = self.sched.connections.getSource('git').\
- connection
+ self.git_connection = self.scheds.first.sched.connections\
+ .getSource('git').connection
def setup_config(self, config_file: str):
config = super(TestGitDriver, self).setup_config(config_file)
@@ -35,7 +35,7 @@ class TestGitDriver(ZuulTestCase):
return config
def test_basic(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Check that we have the git source for common-config and the
# gerrit source for the project.
self.assertEqual('git', tenant.config_projects[0].source.name)
@@ -91,8 +91,8 @@ class TestGitDriver(ZuulTestCase):
# Let's stop the git Watcher to let us merge some changes commits
# We want to verify that config changes are detected for commits
# on the range oldrev..newrev
- self.sched.connections.getSource('git').connection.\
- watcher_thread._pause = True
+ self.scheds.first.sched.connections.getSource('git').connection\
+ .watcher_thread._pause = True
# Add a config change
change = {
'name': 'org/project',
@@ -114,8 +114,8 @@ class TestGitDriver(ZuulTestCase):
'common-config', 'Adding f2',
{'f2': "Content"})
# Restart the git watcher
- self.sched.connections.getSource('git').connection.\
- watcher_thread._pause = False
+ self.scheds.first.sched.connections.getSource('git').connection\
+ .watcher_thread._pause = False
# Wait for the tenant reconfiguration to happen
self.waitForEvent(count)
diff --git a/tests/unit/test_github_crd.py b/tests/unit/test_github_crd.py
index 197331b68..13b59ccf7 100644
--- a/tests/unit/test_github_crd.py
+++ b/tests/unit/test_github_crd.py
@@ -48,7 +48,7 @@ class TestGithubCrossRepoDeps(ZuulTestCase):
B.head_sha))
# There should be no more changes in the queue
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@simple_layout('layouts/crd-github.yaml', driver='github')
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index 8baf60f46..0558c9994 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -147,7 +147,7 @@ class TestGithubDriver(ZuulTestCase):
self.waitUntilSettled()
# Trigger reconfig to enforce a reenqueue of the item
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Now we can release all jobs
@@ -209,11 +209,27 @@ class TestGithubDriver(ZuulTestCase):
sha = tag.commit.hexsha
del repo
+ # Notify zuul about the new branch to load the config
+ self.fake_github.emitEvent(
+ self.fake_github.getPushEvent(
+ 'org/project',
+ ref='refs/heads/%s' % 'tagbranch'))
+ self.waitUntilSettled()
+
+ # Record previous tenant reconfiguration time
+ before = self.scheds.first.sched.tenant_last_reconfigured.get(
+ 'tenant-one', 0)
+
self.fake_github.emitEvent(
self.fake_github.getPushEvent('org/project', 'refs/tags/newtag',
new_rev=sha))
self.waitUntilSettled()
+ # Make sure the tenant hasn't been reconfigured due to the new tag
+ after = self.scheds.first.sched.tenant_last_reconfigured.get(
+ 'tenant-one', 0)
+ self.assertEqual(before, after)
+
build_params = self.builds[0].parameters
self.assertEqual('refs/tags/newtag', build_params['zuul']['ref'])
self.assertFalse('oldrev' in build_params['zuul'])
@@ -324,7 +340,7 @@ class TestGithubDriver(ZuulTestCase):
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('org/common-config',
'layouts/timer-github.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
time.sleep(2)
self.waitUntilSettled()
self.assertEqual(len(self.builds), 1)
@@ -333,7 +349,7 @@ class TestGithubDriver(ZuulTestCase):
# below don't race against more jobs being queued.
self.commitConfigUpdate('org/common-config',
'layouts/basic-github.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -397,7 +413,7 @@ class TestGithubDriver(ZuulTestCase):
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_git_https_url(self):
"""Test that git_ssh option gives git url with ssh"""
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project')
url = self.fake_github.real_getGitUrl(project)
@@ -406,7 +422,7 @@ class TestGithubDriver(ZuulTestCase):
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_git_ssh_url(self):
"""Test that git_ssh option gives git url with ssh"""
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project')
url = self.fake_github_ssh.real_getGitUrl(project)
@@ -415,7 +431,7 @@ class TestGithubDriver(ZuulTestCase):
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_git_enterprise_url(self):
"""Test that git_url option gives git url with proper host"""
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
_, project = tenant.getProject('org/project')
url = self.fake_github_ent.real_getGitUrl(project)
@@ -898,7 +914,8 @@ class TestGithubDriver(ZuulTestCase):
modified_files=modified_files)
# record previous tenant reconfiguration time, which may not be set
- old = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ old = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
self.waitUntilSettled()
if expected_cat_jobs is not None:
@@ -908,7 +925,8 @@ class TestGithubDriver(ZuulTestCase):
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
if expect_reconfigure:
# New timestamp should be greater than the old timestamp
@@ -1086,7 +1104,7 @@ class TestGithubDriver(ZuulTestCase):
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(check_pipeline.getAllItems(), [])
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 2)
@@ -1222,7 +1240,8 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
tenant_config_file = 'config/unprotected-branches/main.yaml'
def test_unprotected_branches(self):
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants\
+ .get('tenant-one')
project1 = tenant.untrusted_projects[0]
project2 = tenant.untrusted_projects[1]
@@ -1240,10 +1259,10 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
github = self.fake_github.getGithubClient()
repo = github.repo_from_project('org/project2')
repo._set_branch_protection('master', True)
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
tpc1 = tenant.project_configs[project1.canonical_name]
tpc2 = tenant.project_configs[project2.canonical_name]
@@ -1270,7 +1289,7 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
self.create_branch('org/project3', 'stable')
repo._set_branch_protection('stable', True)
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest('org/project3', 'stable', 'A')
@@ -1305,7 +1324,7 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
repo = github.repo_from_project('org/project1')
self.create_branch('org/project1', 'feat-x')
repo._set_branch_protection('master', True)
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_github.openFakePullRequest('org/project1', 'master', 'A')
@@ -1338,12 +1357,14 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
modified_files=['zuul.yaml'])
# record previous tenant reconfiguration time, which may not be set
- old = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ old = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
self.waitUntilSettled()
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
# We don't expect a reconfiguration because the push was to an
# unprotected branch
@@ -1356,7 +1377,8 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
# We now expect that zuul reconfigured itself
self.assertLess(old, new)
@@ -1376,11 +1398,12 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
# deleted.
repo._create_branch('feat-x')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# record previous tenant reconfiguration time, which may not be set
- old = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ old = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
self.waitUntilSettled()
# Delete the branch
@@ -1393,7 +1416,8 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
self.fake_github.emitEvent(pevent)
self.waitUntilSettled()
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
# We now expect that zuul reconfigured itself as we deleted a protected
# branch
@@ -1411,7 +1435,7 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
repo._create_branch('release')
repo._create_branch('feature')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
repo._set_branch_protection('release', True)
@@ -1786,7 +1810,7 @@ class TestCheckRunAnnotations(ZuulGithubAppTestCase, AnsibleZuulTestCase):
)
annotations = check_run["output"]["annotations"]
- self.assertEqual(4, len(annotations))
+ self.assertEqual(6, len(annotations))
self.assertEqual(annotations[0], {
"path": "README",
@@ -1796,19 +1820,27 @@ class TestCheckRunAnnotations(ZuulGithubAppTestCase, AnsibleZuulTestCase):
"end_line": 1,
})
- # As the columns are not part of the same line, they are ignored in the
- # annotation. Otherwise Github will complain about the request.
self.assertEqual(annotations[1], {
"path": "README",
"annotation_level": "warning",
+ "message": "Line annotation with level",
+ "start_line": 2,
+ "end_line": 2,
+ })
+
+ # As the columns are not part of the same line, they are ignored in the
+ # annotation. Otherwise Github will complain about the request.
+ self.assertEqual(annotations[2], {
+ "path": "README",
+ "annotation_level": "notice",
"message": "simple range annotation",
"start_line": 4,
"end_line": 6,
})
- self.assertEqual(annotations[2], {
+ self.assertEqual(annotations[3], {
"path": "README",
- "annotation_level": "warning",
+ "annotation_level": "failure",
"message": "Columns must be part of the same line",
"start_line": 7,
"end_line": 7,
@@ -1819,10 +1851,18 @@ class TestCheckRunAnnotations(ZuulGithubAppTestCase, AnsibleZuulTestCase):
# From the invalid/error file comments, only the "line out of file"
# should remain. All others are excluded as they would result in
# invalid Github requests, making the whole check run update fail.
- self.assertEqual(annotations[3], {
+ self.assertEqual(annotations[4], {
"path": "README",
"annotation_level": "warning",
"message": "Line is not part of the file",
"end_line": 9999,
"start_line": 9999
})
+
+ self.assertEqual(annotations[5], {
+ "path": "README",
+ "annotation_level": "warning",
+ "message": "Invalid level will fall back to warning",
+ "start_line": 3,
+ "end_line": 3,
+ })
diff --git a/tests/unit/test_github_requirements.py b/tests/unit/test_github_requirements.py
index 461df55da..e542f80f9 100644
--- a/tests/unit/test_github_requirements.py
+++ b/tests/unit/test_github_requirements.py
@@ -14,7 +14,7 @@
import time
-from tests.base import ZuulTestCase, simple_layout
+from tests.base import ZuulGithubAppTestCase, ZuulTestCase, simple_layout
class TestGithubRequirements(ZuulTestCase):
@@ -588,3 +588,46 @@ class TestGithubRequirements(ZuulTestCase):
self.waitUntilSettled()
self.assertEqual(len(self.history), 2)
self.assertEqual(self.history[1].name, 'project12-status')
+
+
+class TestGithubAppRequirements(ZuulGithubAppTestCase):
+ """Test pipeline and trigger requirements with app authentication"""
+ config_file = 'zuul-github-driver.conf'
+
+ @simple_layout("layouts/requirements-github.yaml", driver="github")
+ def test_pipeline_require_check_run(self):
+ "Test pipeline requirement: status (reported via a check run)"
+ project = "org/project16"
+ github = self.fake_github.getGithubClient()
+ repo = github.repo_from_project(project)
+
+ A = self.fake_github.openFakePullRequest(project, "master", "A")
+ # A comment event that we will keep submitting to trigger
+ comment = A.getCommentAddedEvent("trigger me")
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+
+ # No status from zuul, so nothing should be enqueued
+ self.assertEqual(len(self.history), 0)
+
+ # An error check run should also not cause it to be enqueued
+ repo.create_check_run(
+ A.head_sha,
+ "tenant-one/check",
+ conclusion="failure",
+ app="check-run",
+ )
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ # A success check run goes in, ready to be enqueued
+ repo.create_check_run(
+ A.head_sha,
+ "tenant-one/check",
+ conclusion="success",
+ app="check-run",
+ )
+ self.fake_github.emitEvent(comment)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 1)
diff --git a/tests/unit/test_inventory.py b/tests/unit/test_inventory.py
index ec3db9752..af45aebb7 100644
--- a/tests/unit/test_inventory.py
+++ b/tests/unit/test_inventory.py
@@ -70,9 +70,11 @@ class TestInventoryBase(ZuulTestCase):
self.waitUntilSettled()
def cancelExecutorJobs(self):
- builds = [b for b in self.executor_client.builds.values()]
- for build in builds:
- self.executor_client.cancelJobInQueue(build)
+ for app in self.scheds:
+ executor_client = app.sched.executor
+ builds = [b for b in executor_client.builds.values()]
+ for build in builds:
+ executor_client.cancelJobInQueue(build)
class TestInventoryGithub(TestInventoryBase):
diff --git a/tests/unit/test_lib_ansible.py b/tests/unit/test_lib_ansible.py
index 2d35f0658..808719521 100644
--- a/tests/unit/test_lib_ansible.py
+++ b/tests/unit/test_lib_ansible.py
@@ -13,7 +13,6 @@
# under the License.
import collections
-import subprocess
from unittest import mock
from tests.base import BaseTestCase
@@ -23,8 +22,11 @@ from zuul.lib.ansible import AnsibleManager
class TestLibAnsibleManager(BaseTestCase):
@mock.patch('zuul.lib.ansible.AnsibleManager.load_ansible_config')
- @mock.patch('zuul.lib.ansible.AnsibleManager.getAnsibleCommand')
- def test_validate_remembers_failures(self, getAnsibleCommand, _):
+ @mock.patch('zuul.lib.ansible.AnsibleManager._validate_packages')
+ @mock.patch('zuul.lib.ansible.AnsibleManager._validate_ansible')
+ def test_validate_remembers_failures(self,
+ mock_validate_ansible,
+ mock_validate_packages, _):
okish = mock.Mock(
'subprocess.CompletedProcess',
@@ -33,17 +35,22 @@ class TestLibAnsibleManager(BaseTestCase):
am = AnsibleManager()
am._supported_versions = collections.OrderedDict([
- ('1.0', subprocess.CalledProcessError(1, 'fake failure')),
- ('2.8', okish),
+ ('1.0', False),
+ ('2.8', True),
])
- with mock.patch('subprocess.run') as ansible:
- ansible.side_effect = am._supported_versions.values()
- self.assertFalse(
- am.validate(),
- 'A valid ansible should not mask a previous failure')
+ mock_validate_packages.side_effect = am._supported_versions.values()
+ mock_validate_ansible.side_effect = am._supported_versions.values()
+ self.assertFalse(
+ am.validate(),
+ 'A valid ansible should not mask a previous failure')
+
self.assertEquals(
- [mock.call('1.0', 'ansible'),
- mock.call('2.8', 'ansible'),
+ [mock.call('1.0'),
+ mock.call('2.8')
],
- getAnsibleCommand.mock_calls)
+ mock_validate_ansible.mock_calls)
+
+ self.assertEquals(
+ [mock.call('2.8')],
+ mock_validate_packages.mock_calls)
diff --git a/tests/unit/test_merger_repo.py b/tests/unit/test_merger_repo.py
index 082274a3c..6859f15a2 100644
--- a/tests/unit/test_merger_repo.py
+++ b/tests/unit/test_merger_repo.py
@@ -76,6 +76,55 @@ class TestMergerRepo(ZuulTestCase):
sub_repo.createRepoObject(None).remotes[0].url,
message="Sub repository points to upstream project2")
+ def test_repo_reset_branch_conflict(self):
+ """Test correct reset with conflicting branch names"""
+ parent_path = os.path.join(self.upstream_root, 'org/project1')
+
+ parent_repo = git.Repo(parent_path)
+ parent_repo.create_head("foobar")
+
+ work_repo = Repo(parent_path, self.workspace_root,
+ 'none@example.org', 'User Name', '0', '0')
+
+ # Checkout branch that will be deleted from the remote repo
+ work_repo.checkout("foobar")
+
+ # Delete remote branch and create a branch that conflicts with
+ # the branch checked out locally.
+ parent_repo.delete_head("foobar")
+ parent_repo.create_head("foobar/sub")
+
+ work_repo.reset()
+ work_repo.checkout("foobar/sub")
+
+ # Try the reverse conflict
+ parent_path = os.path.join(self.upstream_root, 'org/project2')
+
+ parent_repo = git.Repo(parent_path)
+ parent_repo.create_head("foobar/sub")
+
+ work_repo = Repo(parent_path, self.workspace_root,
+ 'none@example.org', 'User Name', '0', '0')
+
+ # Checkout branch that will be deleted from the remote repo
+ work_repo.checkout("foobar/sub")
+
+ # Delete remote branch and create a branch that conflicts with
+ # the branch checked out locally.
+ parent_repo.delete_head("foobar/sub")
+
+ # Note: Before git 2.13 deleting a a ref foo/bar leaves an empty
+ # directory foo behind that will block creating the reference foo
+ # in the future. As a workaround we must clean up empty directories
+ # in .git/refs.
+ if parent_repo.git.version_info[:2] < (2, 13):
+ Repo._cleanup_leaked_ref_dirs(parent_path, None, [])
+
+ parent_repo.create_head("foobar")
+
+ work_repo.reset()
+ work_repo.checkout("foobar")
+
def test_set_refs(self):
parent_path = os.path.join(self.upstream_root, 'org/project1')
remote_sha = self.create_commit('org/project1')
diff --git a/tests/unit/test_pagure_driver.py b/tests/unit/test_pagure_driver.py
index 4b7aa524a..0393a13d0 100644
--- a/tests/unit/test_pagure_driver.py
+++ b/tests/unit/test_pagure_driver.py
@@ -61,9 +61,8 @@ class TestPagureDriver(ZuulTestCase):
self.assertThat(
A.comments[1]['comment'],
MatchesRegex(r'.*\[project-test2 \]\(.*\).*', re.DOTALL))
- self.assertEqual(2, len(A.flags))
+ self.assertEqual(1, len(A.flags))
self.assertEqual('success', A.flags[0]['status'])
- self.assertEqual('pending', A.flags[1]['status'])
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_pull_request_updated(self):
@@ -214,10 +213,12 @@ class TestPagureDriver(ZuulTestCase):
newrev = repo.commit('refs/heads/stable-1.0').hexsha
event = self.fake_pagure.getGitBranchEvent(
'org/project', 'stable-1.0', 'creation', newrev)
- old = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ old = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
self.assertEqual(1, len(self.history))
@@ -245,7 +246,8 @@ class TestPagureDriver(ZuulTestCase):
def test_ref_updated_and_tenant_reconfigure(self):
self.waitUntilSettled()
- old = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ old = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
zuul_yaml = [
{'job': {
@@ -271,7 +273,8 @@ class TestPagureDriver(ZuulTestCase):
self.fake_pagure.emitEvent(event)
self.waitUntilSettled()
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
# New timestamp should be greater than the old timestamp
self.assertLess(old, new)
@@ -325,7 +328,7 @@ class TestPagureDriver(ZuulTestCase):
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(check_pipeline.getAllItems(), [])
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 2)
@@ -489,6 +492,31 @@ class TestPagureDriver(ZuulTestCase):
self.assertEqual(1, len(self.history))
@simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
+ def test_flag_require(self):
+
+ A = self.fake_pagure.openFakePullRequest(
+ 'org/project7', 'master', 'A')
+
+ # CI status from other CIs must not be handled
+ self.fake_pagure.emitEvent(
+ A.getPullRequestStatusSetEvent("success", username="notzuul"))
+ self.waitUntilSettled()
+ self.assertEqual(0, len(self.history))
+ self.assertEqual(1, len(A.flags))
+
+ self.fake_pagure.emitEvent(
+ A.getPullRequestStatusSetEvent("failure"))
+ self.waitUntilSettled()
+ self.assertEqual(0, len(self.history))
+ self.assertEqual(2, len(A.flags))
+
+ self.fake_pagure.emitEvent(
+ A.getPullRequestStatusSetEvent("success"))
+ self.waitUntilSettled()
+ self.assertEqual(1, len(self.history))
+ self.assertEqual(2, len(A.flags))
+
+ @simple_layout('layouts/requirements-pagure.yaml', driver='pagure')
def test_pull_request_closed(self):
A = self.fake_pagure.openFakePullRequest(
@@ -587,7 +615,7 @@ class TestPagureDriver(ZuulTestCase):
B.commit_stop))
# There should be no more changes in the queue
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
@simple_layout('layouts/crd-pagure.yaml', driver='pagure')
@@ -992,7 +1020,6 @@ class TestPagureWebhook(ZuulTestCase):
def setUp(self):
super(TestPagureWebhook, self).setUp()
-
# Start the web server
self.web = self.useFixture(
ZuulWebFixture(self.gearman_server.port,
@@ -1010,9 +1037,6 @@ class TestPagureWebhook(ZuulTestCase):
self.fake_pagure.setZuulWebPort(port)
- def tearDown(self):
- super(TestPagureWebhook, self).tearDown()
-
@simple_layout('layouts/basic-pagure.yaml', driver='pagure')
def test_webhook(self):
@@ -1020,6 +1044,13 @@ class TestPagureWebhook(ZuulTestCase):
'org/project', 'master', 'A')
self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent(),
use_zuulweb=True,
+ project='org/project',
+ wrong_token=True)
+ self.waitUntilSettled()
+ self.assertEqual(len(self.history), 0)
+
+ self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent(),
+ use_zuulweb=True,
project='org/project')
self.waitUntilSettled()
@@ -1029,38 +1060,40 @@ class TestPagureWebhook(ZuulTestCase):
self.getJobFromHistory('project-test2').result)
-class TestPagureProjectConnector(ZuulTestCase):
- config_file = 'zuul-pagure-driver.conf'
+class TestPagureWebhookWhitelist(ZuulTestCase):
+ config_file = 'zuul-pagure-driver-whitelist.conf'
- @simple_layout('layouts/basic-pagure.yaml', driver='pagure')
- def test_connectors(self):
+ def setUp(self):
+ super(TestPagureWebhookWhitelist, self).setUp()
+ # Start the web server
+ self.web = self.useFixture(
+ ZuulWebFixture(self.gearman_server.port,
+ self.config, self.test_root))
- project_api_token_exp_date = self.fake_pagure.connectors[
- 'org/project']['api_client'].token_exp_date
+ host = '127.0.0.1'
+ # Wait until web server is started
+ while True:
+ port = self.web.port
+ try:
+ with socket.create_connection((host, port)):
+ break
+ except ConnectionRefusedError:
+ pass
- A = self.fake_pagure.openFakePullRequest(
- 'org/project', 'master', 'A')
- self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent())
- self.waitUntilSettled()
+ self.fake_pagure.setZuulWebPort(port)
- self.assertEqual(
- project_api_token_exp_date,
- self.fake_pagure.connectors[
- 'org/project']['api_client'].token_exp_date)
-
- # Now force a POST error with EINVALIDTOK code and check
- # The connector has been refreshed
- self.fake_pagure.connectors[
- 'org/project']['api_client'].return_post_error = {
- 'error': 'Invalid or expired token',
- 'error_code': 'EINVALIDTOK'
- }
+ @simple_layout('layouts/basic-pagure.yaml', driver='pagure')
+ def test_webhook_whitelist(self):
- self.fake_pagure.emitEvent(A.getPullRequestUpdatedEvent())
+ A = self.fake_pagure.openFakePullRequest(
+ 'org/project', 'master', 'A')
+ self.fake_pagure.emitEvent(A.getPullRequestOpenedEvent(),
+ use_zuulweb=True,
+ project='org/project',
+ wrong_token=True)
self.waitUntilSettled()
- # Expiry date changed meaning the token has been refreshed
- self.assertNotEqual(
- project_api_token_exp_date,
- self.fake_pagure.connectors[
- 'org/project']['api_client'].token_exp_date)
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test1').result)
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test2').result)
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 97581df01..7b5dec082 100644
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -26,6 +26,7 @@ from kazoo.exceptions import NoNodeError
import git
import testtools
+from zuul.scheduler import Scheduler
import zuul.change_matcher
from zuul.driver.gerrit import gerritreporter
@@ -189,9 +190,9 @@ class TestSchedulerAutoholdHoldExpiration(ZuulTestCase):
self.assertTrue(r)
# There should be a record in ZooKeeper
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual(1, len(request_list))
- request = self.zk.getHoldRequest(request_list[0])
+ request = self.scheds.first.sched.zk.getHoldRequest(request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
@@ -219,9 +220,9 @@ class TestSchedulerAutoholdHoldExpiration(ZuulTestCase):
self.assertTrue(r)
# There should be a record in ZooKeeper
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual(1, len(request_list))
- request = self.zk.getHoldRequest(request_list[0])
+ request = self.scheds.first.sched.zk.getHoldRequest(request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
@@ -250,9 +251,9 @@ class TestSchedulerAutoholdHoldExpiration(ZuulTestCase):
self.assertTrue(r)
# There should be a record in ZooKeeper
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual(1, len(request_list))
- request = self.zk.getHoldRequest(request_list[0])
+ request = self.scheds.first.sched.zk.getHoldRequest(request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
@@ -797,7 +798,7 @@ class TestScheduler(ZuulTestCase):
# project-test1 and project-test2 for C
self.assertEqual(len(self.builds), 5)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
builds = items[0].current_build_set.getBuilds()
self.assertEqual(self.countJobResults(builds, 'SUCCESS'), 1)
@@ -908,7 +909,8 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
time.sleep(2)
- data = json.loads(self.sched.formatStatusJSON('tenant-one'))
+ data = json.loads(self.scheds.first.sched
+ .formatStatusJSON('tenant-one'))
found_job = None
for pipeline in data['pipelines']:
if pipeline['name'] != 'gate':
@@ -1224,7 +1226,7 @@ class TestScheduler(ZuulTestCase):
"Test whether a change is ready to merge"
# TODO: move to test_gerrit (this is a unit test!)
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project) = tenant.getProject('org/project')
source = project.source
@@ -1760,9 +1762,9 @@ class TestScheduler(ZuulTestCase):
self.assertTrue(r)
# There should be a record in ZooKeeper
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual(1, len(request_list))
- request = self.zk.getHoldRequest(request_list[0])
+ request = self.scheds.first.sched.zk.getHoldRequest(request_list[0])
self.assertIsNotNone(request)
self.assertEqual('tenant-one', request.tenant)
self.assertEqual('review.example.com/org/project', request.project)
@@ -1821,7 +1823,7 @@ class TestScheduler(ZuulTestCase):
# The hold request current_count should have incremented
# and we should have recorded the held node ID.
- request2 = self.zk.getHoldRequest(request.id)
+ request2 = self.scheds.first.sched.zk.getHoldRequest(request.id)
self.assertEqual(request.current_count + 1, request2.current_count)
self.assertEqual(1, len(request2.nodes))
self.assertEqual(1, len(request2.nodes[0]["nodes"]))
@@ -1843,11 +1845,11 @@ class TestScheduler(ZuulTestCase):
self.assertEqual(held_nodes, 1)
# request current_count should not have changed
- request3 = self.zk.getHoldRequest(request2.id)
+ request3 = self.scheds.first.sched.zk.getHoldRequest(request2.id)
self.assertEqual(request2.current_count, request3.current_count)
# Deleting hold request should set held nodes to used
- self.zk.deleteHoldRequest(request3)
+ self.scheds.first.sched.zk.deleteHoldRequest(request3)
node_states = [n['state'] for n in self.fake_nodepool.getNodes()]
self.assertEqual(3, len(node_states))
self.assertEqual([zuul.model.STATE_USED] * 3, node_states)
@@ -1867,9 +1869,9 @@ class TestScheduler(ZuulTestCase):
self.assertTrue(r)
# There should be a record in ZooKeeper
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual(1, len(request_list))
- request = self.zk.getHoldRequest(request_list[0])
+ request = self.scheds.first.sched.zk.getHoldRequest(request_list[0])
self.assertIsNotNone(request)
request = client.autohold_info(request.id)
@@ -1891,14 +1893,14 @@ class TestScheduler(ZuulTestCase):
self.assertTrue(r)
# There should be a record in ZooKeeper
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual(1, len(request_list))
- request = self.zk.getHoldRequest(request_list[0])
+ request = self.scheds.first.sched.zk.getHoldRequest(request_list[0])
self.assertIsNotNone(request)
# Delete and verify no more requests
self.assertTrue(client.autohold_delete(request.id))
- request_list = self.zk.getHoldRequests()
+ request_list = self.scheds.first.sched.zk.getHoldRequests()
self.assertEqual([], request_list)
def _test_autohold_scoped(self, change_obj, change, ref):
@@ -2137,10 +2139,10 @@ class TestScheduler(ZuulTestCase):
@simple_layout('layouts/autohold.yaml')
def test_autohold_request_expiration(self):
- orig_exp = self.sched.EXPIRED_HOLD_REQUEST_TTL
+ orig_exp = Scheduler.EXPIRED_HOLD_REQUEST_TTL
def reset_exp():
- self.sched.EXPIRED_HOLD_REQUEST_TTL = orig_exp
+ self.scheds.first.sched.EXPIRED_HOLD_REQUEST_TTL = orig_exp
self.addCleanup(reset_exp)
@@ -2171,7 +2173,7 @@ class TestScheduler(ZuulTestCase):
# Temporarily shorten hold time so that the hold request can be
# auto-deleted (which is done on another test failure). And wait
# long enough for nodes to expire and request to delete.
- self.sched.EXPIRED_HOLD_REQUEST_TTL = 1
+ self.scheds.first.sched.EXPIRED_HOLD_REQUEST_TTL = 1
time.sleep(3)
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
@@ -2190,7 +2192,7 @@ class TestScheduler(ZuulTestCase):
self.wait_timeout = 120
"test that dependent changes behind dequeued changes work"
# This complicated test is a reproduction of a real life bug
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
@@ -2317,7 +2319,7 @@ class TestScheduler(ZuulTestCase):
"Test that the merger works with large changes after a repack"
# https://bugs.executepad.net/zuul/+bug/1078946
# This test assumes the repo is already cloned; make sure it is
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
trusted, project = tenant.getProject('org/project')
url = self.fake_gerrit.getGitUrl(project)
self.executor_server.merger._addProject('review.example.com',
@@ -2399,7 +2401,7 @@ class TestScheduler(ZuulTestCase):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes
@@ -2520,7 +2522,7 @@ class TestScheduler(ZuulTestCase):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
# Add two git-dependent changes
@@ -2603,7 +2605,7 @@ class TestScheduler(ZuulTestCase):
self.executor_server.hold_jobs_in_build = True
# Start timer trigger - also org/project
self.commitConfigUpdate('common-config', 'layouts/idle.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
time.sleep(5)
@@ -2612,7 +2614,7 @@ class TestScheduler(ZuulTestCase):
# below don't race against more jobs being queued.
# Must be in same repo, so overwrite config with another one
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -2746,7 +2748,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.assertEqual(len(self.gearman_server.getQueue()), 0)
- self.assertTrue(self.sched._areAllBuildsComplete())
+ self.assertTrue(self.scheds.first.sched._areAllBuildsComplete())
self.assertEqual(len(self.history), 0)
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(A.reported, 2)
@@ -2763,7 +2765,7 @@ class TestScheduler(ZuulTestCase):
self.assertEqual(A.reported, False)
# Check queue is empty afterwards
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
@@ -2859,7 +2861,7 @@ class TestScheduler(ZuulTestCase):
def test_statsd(self):
"Test each of the statsd methods used in the scheduler"
- statsd = self.sched.statsd
+ statsd = self.scheds.first.sched.statsd
statsd.incr('test-incr')
statsd.timing('test-timing', 3)
statsd.gauge('test-gauge', 12)
@@ -2878,7 +2880,7 @@ class TestScheduler(ZuulTestCase):
self.assertReportedStat('hostname-gauge.1_2_3_4.1_2', '12', 'g')
def test_statsd_conflict(self):
- statsd = self.sched.statsd
+ statsd = self.scheds.first.sched.statsd
statsd.gauge('test-gauge', 12)
# since test-gauge is already a value, we can't make
# subvalues. Test the assert works.
@@ -2900,14 +2902,14 @@ class TestScheduler(ZuulTestCase):
self.assertEqual(len(self.gearman_server.getQueue()), 1)
self.commitConfigUpdate('common-config', 'layouts/no-jobs.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.gearman_server.release('gate-noop')
self.waitUntilSettled()
# asserting that project-merge is removed from queue
self.assertEqual(len(self.gearman_server.getQueue()), 0)
- self.assertTrue(self.sched._areAllBuildsComplete())
+ self.assertTrue(self.scheds.first.sched._areAllBuildsComplete())
self.assertEqual(len(self.history), 1)
self.assertEqual(self.history[0].name, 'gate-noop')
@@ -3149,7 +3151,7 @@ class TestScheduler(ZuulTestCase):
def test_queue_names(self):
"Test shared change queue names"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
q1 = tenant.layout.pipelines['gate'].getQueue(project1)
@@ -3160,7 +3162,7 @@ class TestScheduler(ZuulTestCase):
@simple_layout("layouts/template-queue.yaml")
def test_template_queue(self):
"Test a shared queue can be constructed from a project-template"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
q1 = tenant.layout.pipelines['gate'].getQueue(project1)
@@ -3171,7 +3173,7 @@ class TestScheduler(ZuulTestCase):
@simple_layout("layouts/regex-template-queue.yaml")
def test_regex_template_queue(self):
"Test a shared queue can be constructed from a regex project-template"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
q1 = tenant.layout.pipelines['gate'].getQueue(project1)
@@ -3182,7 +3184,7 @@ class TestScheduler(ZuulTestCase):
@simple_layout("layouts/regex-queue.yaml")
def test_regex_queue(self):
"Test a shared queue can be constructed from a regex project"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
(trusted, project1) = tenant.getProject('org/project1')
(trusted, project2) = tenant.getProject('org/project2')
q1 = tenant.layout.pipelines['gate'].getQueue(project1)
@@ -3221,24 +3223,28 @@ class TestScheduler(ZuulTestCase):
def test_reconfigure_merge(self):
"""Test that two reconfigure events are merged"""
- tenant = self.sched.abide.tenants['tenant-one']
+ tenant = self.scheds.first.sched.abide.tenants['tenant-one']
(trusted, project) = tenant.getProject('org/project')
- self.sched.run_handler_lock.acquire()
- self.assertEqual(self.sched.management_event_queue.qsize(), 0)
+ self.scheds.first.sched.run_handler_lock.acquire()
+ self.assertEqual(
+ self.scheds.first.sched.management_event_queue.qsize(), 0)
- self.sched.reconfigureTenant(tenant, project, None)
- self.assertEqual(self.sched.management_event_queue.qsize(), 1)
+ self.scheds.first.sched.reconfigureTenant(tenant, project, None)
+ self.assertEqual(
+ self.scheds.first.sched.management_event_queue.qsize(), 1)
- self.sched.reconfigureTenant(tenant, project, None)
+ self.scheds.first.sched.reconfigureTenant(tenant, project, None)
# The second event should have been combined with the first
# so we should still only have one entry.
- self.assertEqual(self.sched.management_event_queue.qsize(), 1)
+ self.assertEqual(
+ self.scheds.first.sched.management_event_queue.qsize(), 1)
- self.sched.run_handler_lock.release()
+ self.scheds.first.sched.run_handler_lock.release()
self.waitUntilSettled()
- self.assertEqual(self.sched.management_event_queue.qsize(), 0)
+ self.assertEqual(
+ self.scheds.first.sched.management_event_queue.qsize(), 0)
def test_live_reconfiguration(self):
"Test that live reconfiguration works"
@@ -3248,7 +3254,7 @@ class TestScheduler(ZuulTestCase):
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3267,7 +3273,8 @@ class TestScheduler(ZuulTestCase):
"Test that live reconfiguration via command socket works"
# record previous tenant reconfiguration time, which may not be set
- old = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ old = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
self.waitUntilSettled()
command_socket = self.config.get('scheduler', 'command_socket')
@@ -3282,7 +3289,8 @@ class TestScheduler(ZuulTestCase):
while True:
if time.time() - start > 15:
raise Exception("Timeout waiting for full reconfiguration")
- new = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
if old < new:
break
else:
@@ -3297,14 +3305,14 @@ class TestScheduler(ZuulTestCase):
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['gate']
change = pipeline.getAllItems()[0].change
# Set this to an invalid value to cause an exception during
# reconfiguration.
change.branch = None
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3353,7 +3361,7 @@ class TestScheduler(ZuulTestCase):
# Add the "project-test3" job.
self.commitConfigUpdate('common-config',
'layouts/live-reconfiguration-add-job.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3414,7 +3422,7 @@ class TestScheduler(ZuulTestCase):
# Add the "project-test3" job.
self.commitConfigUpdate('common-config',
'layouts/live-reconfiguration-add-job.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3467,7 +3475,7 @@ class TestScheduler(ZuulTestCase):
# Remove the test1 job.
self.commitConfigUpdate('common-config',
'layouts/live-reconfiguration-failed-job.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3517,7 +3525,7 @@ class TestScheduler(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-shared-queue.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3559,7 +3567,7 @@ class TestScheduler(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-shared-queue-removed.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3592,7 +3600,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
# Reconfigure (with only one change in the pipeline).
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Add the child change.
@@ -3602,7 +3610,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
# Reconfigure (with both in the pipeline).
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3641,7 +3649,7 @@ class TestScheduler(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'layouts/live-reconfiguration-del-project.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Builds for C aborted, builds for A succeed,
@@ -3665,7 +3673,7 @@ class TestScheduler(ZuulTestCase):
self.assertEqual(B.reported, 0)
self.assertEqual(C.reported, 0)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(len(tenant.layout.pipelines['check'].queues), 0)
self.assertIn('Build succeeded', A.messages[0])
@@ -3711,7 +3719,7 @@ class TestScheduler(ZuulTestCase):
dict(name='job2', result='ABORTED', changes='1,1 2,1'),
], ordered=False)
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
@@ -3742,7 +3750,7 @@ class TestScheduler(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'layouts/delayed-repo-init.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
A = self.fake_gerrit.addFakeChange('org/new-project', 'master', 'A')
@@ -3769,7 +3777,8 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
def get_job():
- data = json.loads(self.sched.formatStatusJSON('tenant-one'))
+ data = json.loads(self.scheds.first.sched
+ .formatStatusJSON('tenant-one'))
for pipeline in data['pipelines']:
for queue in pipeline['change_queues']:
for head in queue['heads']:
@@ -3781,7 +3790,7 @@ class TestScheduler(ZuulTestCase):
job = get_job()
self.assertTrue(job['queued'])
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(self.config))
self.waitUntilSettled()
job = get_job()
@@ -3872,7 +3881,7 @@ class TestScheduler(ZuulTestCase):
self.create_branch('org/project', 'stable')
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer-template.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
@@ -3897,7 +3906,7 @@ class TestScheduler(ZuulTestCase):
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -3922,7 +3931,7 @@ class TestScheduler(ZuulTestCase):
self.create_branch('org/project', 'stable')
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', config_file)
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
@@ -3944,7 +3953,8 @@ class TestScheduler(ZuulTestCase):
# Ensure that the status json has the ref so we can render it in the
# web ui.
- data = json.loads(self.sched.formatStatusJSON('tenant-one'))
+ data = json.loads(self.scheds.first.sched
+ .formatStatusJSON('tenant-one'))
pipeline = [x for x in data['pipelines'] if x['name'] == 'periodic'][0]
first = pipeline['change_queues'][0]['heads'][0][0]
second = pipeline['change_queues'][1]['heads'][0][0]
@@ -3957,7 +3967,7 @@ class TestScheduler(ZuulTestCase):
# Stop queuing timer triggered jobs so that the assertions
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -3996,7 +4006,7 @@ class TestScheduler(ZuulTestCase):
# Start timer trigger
self.commitConfigUpdate('common-config',
'layouts/idle.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# The pipeline triggers every second, so we should have seen
@@ -4007,7 +4017,7 @@ class TestScheduler(ZuulTestCase):
# below don't race against more jobs being queued.
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job,
# we can end up with one more event firing, so give it an
@@ -4058,7 +4068,7 @@ class TestScheduler(ZuulTestCase):
# should succeed.
report_mock.side_effect = Exception('Gerrit failed to report')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check = tenant.layout.pipelines['check']
check.success_actions = sorted(check.success_actions,
@@ -4101,7 +4111,7 @@ class TestScheduler(ZuulTestCase):
# the hold flag before the first job.
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer-smtp.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# The pipeline triggers every second, so we should have seen
# several by now.
@@ -4134,7 +4144,7 @@ class TestScheduler(ZuulTestCase):
# Stop queuing timer triggered jobs and let any that may have
# queued through so that end of test assertions pass.
self.commitConfigUpdate('common-config', 'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
# can end up with one more event firing, so give it an extra
@@ -4150,7 +4160,7 @@ class TestScheduler(ZuulTestCase):
self.worker.hold_jobs_in_build = True
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.registerJobs()
# The pipeline triggers every second, so we should have seen
@@ -4182,7 +4192,7 @@ class TestScheduler(ZuulTestCase):
# queued through so that end of test assertions pass.
self.config.set('zuul', 'layout_config',
'tests/fixtures/layout-no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.registerJobs()
self.waitUntilSettled()
# If APScheduler is in mid-event when we remove the job, we
@@ -4388,7 +4398,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
gate_pipeline = tenant.layout.pipelines['gate']
self.assertEqual(gate_pipeline.getAllItems(), [])
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
@@ -4426,7 +4436,7 @@ class TestScheduler(ZuulTestCase):
ref=None)
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
self.assertEqual(len(check_pipeline.getAllItems()), 2)
self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
@@ -4491,7 +4501,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# We expect that one build for each branch (master and stable) appears.
for _ in iterate_timeout(30, 'Wait for two builds that are hold'):
@@ -4509,7 +4519,7 @@ class TestScheduler(ZuulTestCase):
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
@@ -4627,7 +4637,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
items = tenant.layout.pipelines['gate'].getAllItems()
enqueue_times = {}
for item in items:
@@ -4825,7 +4835,7 @@ class TestScheduler(ZuulTestCase):
self.executor_server.release('project-.*')
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# A failed so window is reduced by 1 to 1.
self.assertEqual(queue.window, 1)
@@ -4913,7 +4923,7 @@ class TestScheduler(ZuulTestCase):
self.executor_server.release('project-.*')
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# A failed so window is reduced by 1 to 1.
self.assertEqual(queue.window, 1)
@@ -5035,7 +5045,7 @@ class TestScheduler(ZuulTestCase):
# D's remaining job
self.assertEqual(self.builds[2].name, 'project-test2')
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 1)
@@ -5044,7 +5054,7 @@ class TestScheduler(ZuulTestCase):
self.commitConfigUpdate('org/common-config',
'layouts/rate-limit-reconfigure2.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# D's remaining job should still be queued
@@ -5066,7 +5076,7 @@ class TestScheduler(ZuulTestCase):
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 20)
self.assertTrue(len(self.builds), 4)
@@ -5075,16 +5085,16 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-window2.yaml')
- self.sched.reconfigure(self.config)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# Even though we have configured a smaller window, the value
# on the existing shared queue should be used.
self.assertEqual(queue.window, 20)
self.assertTrue(len(self.builds), 4)
- self.sched.reconfigure(self.config)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 20)
self.assertTrue(len(self.builds), 4)
@@ -5114,7 +5124,7 @@ class TestScheduler(ZuulTestCase):
self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 2)
self.assertEqual(len(self.builds), 4)
@@ -5122,9 +5132,9 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-window-fixed2.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# Because we have configured a static window, it should
# be allowed to shrink on reconfiguration.
@@ -5133,8 +5143,8 @@ class TestScheduler(ZuulTestCase):
# next pass through the queue processor.
self.assertEqual(len(self.builds), 4)
- self.sched.reconfigure(self.config)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 1)
self.waitUntilSettled()
@@ -5173,7 +5183,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.log.debug("B complete")
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 2)
self.assertEqual(len(self.builds), 2)
@@ -5181,11 +5191,11 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-window-fixed2.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.log.debug("Reconfiguration complete")
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
# Because we have configured a static window, it should
# be allowed to shrink on reconfiguration.
@@ -5196,7 +5206,7 @@ class TestScheduler(ZuulTestCase):
# run and marked B inactive; run another reconfiguration so
# that we're testing what happens when we reconfigure after
# the active window having shrunk.
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Unpause the node requests now
self.fake_nodepool.unpause()
@@ -5209,7 +5219,7 @@ class TestScheduler(ZuulTestCase):
self.waitUntilSettled()
self.log.debug("Executor unpause complete")
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
queue = tenant.layout.pipelines['gate'].queues[0]
self.assertEqual(queue.window, 1)
@@ -5237,13 +5247,13 @@ class TestScheduler(ZuulTestCase):
# Remove job2
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-remove-add2.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.assertTrue(len(self.builds), 1)
# Add job2 back
self.commitConfigUpdate('org/common-config',
'layouts/reconfigure-remove-add.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.assertTrue(len(self.builds), 2)
self.executor_server.hold_jobs_in_build = False
@@ -5265,17 +5275,17 @@ class TestScheduler(ZuulTestCase):
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
- self.assertEqual(len(self.executor_client.builds), 1)
+ self.assertEqual(len(self.scheds.first.sched.executor.builds), 1)
self.log.debug('Current builds:')
- self.log.debug(self.executor_client.builds)
+ self.log.debug(self.scheds.first.sched.executor.builds)
start = time.time()
while True:
if time.time() - start > 10:
raise Exception("Timeout waiting for gearman server to report "
+ "back to the client")
- build = list(self.executor_client.builds.values())[0]
+ build = list(self.scheds.first.sched.executor.builds.values())[0]
if build.worker.name == self.executor_server.hostname:
break
else:
@@ -5342,7 +5352,7 @@ For CI problems and help debugging, contact ci@example.org"""
def test_merge_failure_reporters(self):
"""Check that the config is set up correctly"""
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(
"Merge Failed.\n\nThis change or one of its cross-repo "
"dependencies was unable to be automatically merged with the "
@@ -5386,7 +5396,7 @@ For CI problems and help debugging, contact ci@example.org"""
to the correct reporter"""
self.updateConfigLayout(
'tests/fixtures/layout-merge-failure.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.registerJobs()
# Check a test failure isn't reported to SMTP
@@ -5460,7 +5470,7 @@ For CI problems and help debugging, contact ci@example.org"""
if time.time() - start > 10:
raise Exception("Timeout waiting for gearman server to report "
+ "back to the client")
- build = list(self.executor_client.builds.values())[0]
+ build = list(self.scheds.first.sched.executor.builds.values())[0]
if build.worker.name == self.executor_server.hostname:
break
else:
@@ -5527,7 +5537,7 @@ For CI problems and help debugging, contact ci@example.org"""
def test_disable_at(self):
"Test a pipeline will only report to the disabled trigger when failing"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(3, tenant.layout.pipelines['check'].disable_at)
self.assertEqual(
0, tenant.layout.pipelines['check']._consecutive_failures)
@@ -5623,9 +5633,9 @@ For CI problems and help debugging, contact ci@example.org"""
# Now reload the configuration (simulate a HUP) to check the pipeline
# comes out of disabled
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(3, tenant.layout.pipelines['check'].disable_at)
self.assertEqual(
@@ -5750,8 +5760,8 @@ For CI problems and help debugging, contact ci@example.org"""
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
- self.zk.client.stop()
- self.zk.client.start()
+ self.scheds.execute(lambda app: app.sched.zk.client.stop())
+ self.scheds.execute(lambda app: app.sched.zk.client.start())
self.fake_nodepool.unpause()
self.waitUntilSettled()
@@ -5772,11 +5782,11 @@ For CI problems and help debugging, contact ci@example.org"""
# We're waiting on the nodepool request to complete. Stop the
# scheduler from processing further events, then fulfill the
# nodepool request.
- self.sched.run_handler_lock.acquire()
+ self.scheds.first.sched.run_handler_lock.acquire()
# Fulfill the nodepool request.
self.fake_nodepool.unpause()
- requests = list(self.sched.nodepool.requests.values())
+ requests = list(self.scheds.first.sched.nodepool.requests.values())
self.assertEqual(1, len(requests))
request = requests[0]
for x in iterate_timeout(30, 'fulfill request'):
@@ -5786,12 +5796,12 @@ For CI problems and help debugging, contact ci@example.org"""
# The request is fulfilled, but the scheduler hasn't processed
# it yet. Reconnect ZK.
- self.zk.client.stop()
- self.zk.client.start()
+ self.scheds.execute(lambda app: app.sched.zk.client.stop())
+ self.scheds.execute(lambda app: app.sched.zk.client.start())
# Allow the scheduler to continue and process the (now
# out-of-date) notification that nodes are ready.
- self.sched.run_handler_lock.release()
+ self.scheds.first.sched.run_handler_lock.release()
# It should resubmit the request, once it's fulfilled, we can
# wait for it to run jobs and settle.
@@ -5980,7 +5990,7 @@ For CI problems and help debugging, contact ci@example.org"""
# Fulfill only the first request
self.fake_nodepool.fulfillRequest(reqs[0])
for x in iterate_timeout(30, 'fulfill request'):
- if len(self.sched.nodepool.requests) < 4:
+ if len(self.scheds.first.sched.nodepool.requests) < 4:
break
self.waitUntilSettled()
@@ -6056,7 +6066,7 @@ For CI problems and help debugging, contact ci@example.org"""
self.waitUntilSettled()
self.commitConfigUpdate('common-config', 'layouts/no-jobs.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.fake_nodepool.unpause()
@@ -6091,13 +6101,13 @@ For CI problems and help debugging, contact ci@example.org"""
self.fake_gerrit.addEvent(A.getRefUpdatedEvent())
self.waitUntilSettled()
# Reconfigure while we still have an outstanding merge job
- self.sched.reconfigureTenant(self.sched.abide.tenants['tenant-one'],
- None, None)
+ self.scheds.first.sched.reconfigureTenant(
+ self.scheds.first.sched.abide.tenants['tenant-one'], None, None)
self.waitUntilSettled()
# Verify the merge job is still running and that the item is
# in the pipeline
- self.assertEqual(len(self.sched.merger.jobs), 1)
- tenant = self.sched.abide.tenants.get('tenant-one')
+ self.assertEqual(len(self.scheds.first.sched.merger.jobs), 1)
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
pipeline = tenant.layout.pipelines['post']
self.assertEqual(len(pipeline.getAllItems()), 1)
self.gearman_server.hold_merge_jobs_in_queue = False
@@ -6822,7 +6832,7 @@ class TestSchedulerSuccessURL(ZuulTestCase):
def test_success_url(self):
"Ensure bad build params are ignored"
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.init_repo('org/docs')
A = self.fake_gerrit.addFakeChange('org/docs', 'master', 'A')
@@ -7052,7 +7062,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_one(self):
"Test semaphores with max=1 (mutex)"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
@@ -7133,7 +7143,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_two(self):
"Test semaphores with max>1"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
@@ -7213,7 +7223,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_node_failure(self):
"Test semaphore and node failure"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Pause nodepool so we can fail the node request later
self.fake_nodepool.pause()
@@ -7245,7 +7255,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_resources_first(self):
"Test semaphores with max=1 (mutex) and get resources first"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.hold_jobs_in_build = True
@@ -7292,7 +7302,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_resources_first_node_failure(self):
"Test semaphore and node failure"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Pause nodepool so we can fail the node request later
self.fake_nodepool.pause()
@@ -7324,21 +7334,21 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_zk_error(self):
"Test semaphore release with zk error"
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project2', 'master', 'A')
self.assertFalse('test-semaphore' in
tenant.semaphore_handler.semaphores)
# Simulate a single zk error in useNodeSet
- orig_useNodeSet = self.nodepool.useNodeSet
+ orig_useNodeSet = self.scheds.first.sched.nodepool.useNodeSet
def broken_use_nodeset(nodeset, build_set=None, event=None):
# restore original useNodeSet
- self.nodepool.useNodeSet = orig_useNodeSet
+ self.scheds.first.sched.nodepool.useNodeSet = orig_useNodeSet
raise NoNodeError()
- self.nodepool.useNodeSet = broken_use_nodeset
+ self.scheds.first.sched.nodepool.useNodeSet = broken_use_nodeset
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
@@ -7354,7 +7364,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_abandon(self):
"Test abandon with job semaphores"
self.executor_server.hold_jobs_in_build = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -7390,7 +7400,7 @@ class TestSemaphore(ZuulTestCase):
# and aquiring the semaphore.
self.fake_nodepool.paused = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -7432,7 +7442,7 @@ class TestSemaphore(ZuulTestCase):
# in a controlled manner.
self.fake_nodepool.paused = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -7441,13 +7451,13 @@ class TestSemaphore(ZuulTestCase):
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- self.assertEqual(len(self.nodepool.requests), 2)
+ self.assertEqual(len(self.scheds.first.sched.nodepool.requests), 2)
# Now unpause nodepool to fulfill the node requests. We cannot use
# waitUntilSettled here because the executor is paused.
self.fake_nodepool.paused = False
for _ in iterate_timeout(30, 'fulfill node requests'):
- if len(self.nodepool.requests) == 0:
+ if len(self.scheds.first.sched.nodepool.requests) == 0:
break
self.assertTrue('test-semaphore' in
@@ -7470,7 +7480,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_new_patchset(self):
"Test new patchset with job semaphores"
self.executor_server.hold_jobs_in_build = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
@@ -7510,7 +7520,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_reconfigure(self):
"Test reconfigure with job semaphores"
self.executor_server.hold_jobs_in_build = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertFalse('test-semaphore' in
tenant.semaphore_handler.semaphores)
@@ -7522,9 +7532,9 @@ class TestSemaphore(ZuulTestCase):
tenant.semaphore_handler.semaphores)
# reconfigure without layout change
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# semaphore still must be held
self.assertTrue('test-semaphore' in
@@ -7533,9 +7543,9 @@ class TestSemaphore(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'config/semaphore/zuul-reconfiguration.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.executor_server.release('project-test1')
self.waitUntilSettled()
@@ -7550,7 +7560,7 @@ class TestSemaphore(ZuulTestCase):
def test_semaphore_reconfigure_job_removal(self):
"Test job removal during reconfiguration with semaphores"
self.executor_server.hold_jobs_in_build = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertFalse('test-semaphore' in
@@ -7565,7 +7575,7 @@ class TestSemaphore(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'config/semaphore/git/common-config/zuul-remove-job.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Release job project-test1 which should be the only job left
@@ -7573,7 +7583,7 @@ class TestSemaphore(ZuulTestCase):
self.waitUntilSettled()
# The check pipeline should be empty
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
@@ -7597,7 +7607,7 @@ class TestSemaphore(ZuulTestCase):
# reconfiguration.
self.fake_nodepool.pause()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.assertFalse('test-semaphore' in
@@ -7612,7 +7622,7 @@ class TestSemaphore(ZuulTestCase):
self.commitConfigUpdate(
'common-config',
'config/semaphore/git/common-config/zuul-remove-job.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
# Now we can unpause nodepool
@@ -7624,7 +7634,7 @@ class TestSemaphore(ZuulTestCase):
self.waitUntilSettled()
# The check pipeline should be empty
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
items = check_pipeline.getAllItems()
self.assertEqual(len(items), 0)
@@ -7645,8 +7655,8 @@ class TestSemaphoreMultiTenant(ZuulTestCase):
"Test semaphores in multiple tenants"
self.waitUntilSettled()
- tenant_one = self.sched.abide.tenants.get('tenant-one')
- tenant_two = self.sched.abide.tenants.get('tenant-two')
+ tenant_one = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ tenant_two = self.scheds.first.sched.abide.tenants.get('tenant-two')
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
@@ -7837,7 +7847,7 @@ class TestSemaphoreInRepo(ZuulTestCase):
# after the change lands.
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
in_repo_conf = textwrap.dedent(
"""
@@ -7920,7 +7930,7 @@ class TestSemaphoreInRepo(ZuulTestCase):
self.waitUntilSettled()
# now that change A was merged, the new semaphore max must be effective
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEqual(tenant.layout.semaphores.get('test-semaphore').max, 2)
# two builds must be in queue, two semaphores acquired
@@ -8095,7 +8105,7 @@ class TestSchedulerFailFast(ZuulTestCase):
# Commit new config that removes project-test1
self.commitConfigUpdate('common-config',
'layouts/fail-fast-reconfigure.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Release project-test1
self.executor_server.release('project-test1')
@@ -8259,13 +8269,16 @@ class TestSchedulerSmartReconfiguration(ZuulTestCase):
self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
# record previous tenant reconfiguration time, which may not be set
- old_one = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
- old_two = self.sched.tenant_last_reconfigured.get('tenant-two', 0)
+ old_one = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
+ old_two = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-two', 0)
self.waitUntilSettled()
self.newTenantConfig('config/multi-tenant/main-reconfig.yaml')
- self.sched_app.smartReconfigure(command_socket=command_socket)
+ self.scheds.execute(
+ lambda app: app.smartReconfigure(command_socket=command_socket))
# Wait for smart reconfiguration. Only tenant-two should be
# reconfigured. Note that waitUntilSettled is not
@@ -8275,7 +8288,8 @@ class TestSchedulerSmartReconfiguration(ZuulTestCase):
while True:
if time.time() - start > 15:
raise Exception("Timeout waiting for smart reconfiguration")
- new_two = self.sched.tenant_last_reconfigured.get('tenant-two', 0)
+ new_two = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-two', 0)
if old_two < new_two:
break
else:
@@ -8283,7 +8297,8 @@ class TestSchedulerSmartReconfiguration(ZuulTestCase):
# Ensure that tenant-one has not been reconfigured
self.waitUntilSettled()
- new_one = self.sched.tenant_last_reconfigured.get('tenant-one', 0)
+ new_one = self.scheds.first.sched.tenant_last_reconfigured\
+ .get('tenant-one', 0)
self.assertEqual(old_one, new_one)
self.executor_server.hold_jobs_in_build = False
@@ -8299,11 +8314,12 @@ class TestSchedulerSmartReconfiguration(ZuulTestCase):
# Verify known tenants
expected_tenants = {'tenant-one', 'tenant-two', 'tenant-four'}
- self.assertEqual(expected_tenants, self.sched.abide.tenants.keys())
+ self.assertEqual(expected_tenants,
+ self.scheds.first.sched.abide.tenants.keys())
- self.assertIsNotNone(
- self.sched.tenant_last_reconfigured.get('tenant-four'),
- 'Tenant tenant-four should exist now.')
+ self.assertIsNotNone(self.scheds.first.sched.tenant_last_reconfigured
+ .get('tenant-four'),
+ 'Tenant tenant-four should exist now.')
# Test that the new tenant-four actually works
D = self.fake_gerrit.addFakeChange('org/project4', 'master', 'D')
diff --git a/tests/unit/test_serial.py b/tests/unit/test_serial.py
new file mode 100644
index 000000000..f91bb4eda
--- /dev/null
+++ b/tests/unit/test_serial.py
@@ -0,0 +1,123 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tests.base import (
+ ZuulTestCase,
+ simple_layout,
+)
+
+
+class TestSerial(ZuulTestCase):
+ tenant_config_file = 'config/single-tenant/main.yaml'
+
+ @simple_layout('layouts/serial.yaml')
+ def test_deploy_window(self):
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.setMerged()
+ self.fake_gerrit.addEvent(B.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 2)
+ self.assertTrue(self.builds[0].hasChanges(A))
+ self.assertTrue(self.builds[1].hasChanges(A))
+ self.assertFalse(self.builds[0].hasChanges(B))
+ self.assertFalse(self.builds[1].hasChanges(B))
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 2)
+ self.assertTrue(self.builds[0].hasChanges(A))
+ self.assertTrue(self.builds[1].hasChanges(A))
+ self.assertTrue(self.builds[0].hasChanges(B))
+ self.assertTrue(self.builds[1].hasChanges(B))
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='job1', result='SUCCESS', changes='1,1'),
+ dict(name='job2', result='SUCCESS', changes='1,1'),
+ dict(name='job1', result='SUCCESS', changes='2,1'),
+ dict(name='job2', result='SUCCESS', changes='2,1'),
+ ], ordered=False)
+
+ @simple_layout('layouts/serial.yaml')
+ def test_deploy_shared(self):
+ # Same as test_deploy_window but with two separate projects
+ # sharing a queue.
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B')
+ B.setMerged()
+ self.fake_gerrit.addEvent(B.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 1)
+ self.assertTrue(self.builds[0].hasChanges(A))
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 1)
+ self.assertTrue(self.builds[0].hasChanges(B))
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='job1', result='SUCCESS', changes='1,1'),
+ dict(name='job1', result='SUCCESS', changes='2,1'),
+ ], ordered=False)
+
+ @simple_layout('layouts/serial.yaml')
+ def test_deploy_unshared(self):
+ # Test two projects which don't share a queue.
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ A.setMerged()
+ self.fake_gerrit.addEvent(A.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+ B.setMerged()
+ self.fake_gerrit.addEvent(B.getChangeMergedEvent())
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 3)
+ self.assertTrue(self.builds[0].hasChanges(A))
+ self.assertTrue(self.builds[1].hasChanges(A))
+ self.assertTrue(self.builds[2].hasChanges(B))
+ self.assertFalse(self.builds[2].hasChanges(A))
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 1)
+ self.assertEqual(B.reported, 1)
+ self.assertHistory([
+ dict(name='job1', result='SUCCESS', changes='1,1'),
+ dict(name='job2', result='SUCCESS', changes='1,1'),
+ dict(name='job1', result='SUCCESS', changes='2,1'),
+ ], ordered=False)
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 252f103c9..a80675544 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -384,7 +384,7 @@ class TestBranchDeletion(ZuulTestCase):
self.waitUntilSettled()
self.delete_branch('org/project', 'stable/queens')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
in_repo_conf = textwrap.dedent(
@@ -999,7 +999,7 @@ class TestInRepoConfig(ZuulTestCase):
A.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
gc.collect()
@@ -1139,7 +1139,7 @@ class TestInRepoConfig(ZuulTestCase):
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 0,
"No error should have been accumulated")
@@ -1233,7 +1233,7 @@ class TestInRepoConfig(ZuulTestCase):
def test_dynamic_config_new_patchset(self):
self.executor_server.hold_jobs_in_build = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
check_pipeline = tenant.layout.pipelines['check']
in_repo_conf = textwrap.dedent(
@@ -2186,7 +2186,7 @@ class TestInRepoConfig(ZuulTestCase):
self.fake_gerrit.addEvent(B.getChangeMergedEvent())
self.waitUntilSettled()
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# Ensure the latest change is reflected in the config; if it
# isn't this will raise an exception.
tenant.layout.getJob('project-test2')
@@ -2435,7 +2435,8 @@ class TestNonLiveMerges(ZuulTestCase):
# We expect one merge call per live change, plus one call for
# each non-live change with a config update (which is all of them).
- self.assertEqual(len(self.merge_client.history['merger:merge']), 6)
+ self.assertEqual(
+ len(self.scheds.first.sched.merger.history['merger:merge']), 6)
def test_non_live_merges(self):
"""
@@ -2458,7 +2459,8 @@ class TestNonLiveMerges(ZuulTestCase):
self.waitUntilSettled()
# We expect one merge call per live change.
- self.assertEqual(len(self.merge_client.history['merger:merge']), 3)
+ self.assertEqual(
+ len(self.scheds.first.sched.merger.history['merger:merge']), 3)
class TestJobContamination(AnsibleZuulTestCase):
@@ -2536,7 +2538,7 @@ class TestInRepoJoin(ZuulTestCase):
# dependent pipeline for the first time
self.executor_server.hold_jobs_in_build = True
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
gate_pipeline = tenant.layout.pipelines['gate']
self.assertEqual(gate_pipeline.queues, [])
@@ -3083,7 +3085,7 @@ class TestBrokenTrustedConfig(ZuulTestCase):
def test_broken_config_on_startup(self):
# verify get the errors at tenant level.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 1,
@@ -3140,7 +3142,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_broken_config_on_startup(self):
# verify get the errors at tenant level.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
loading_errors = tenant.layout.loading_errors
self.assertEquals(
len(tenant.layout.loading_errors), 2,
@@ -3156,7 +3158,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_broken_config_on_startup_template(self):
# Verify that a missing project-template doesn't break gate
# pipeline construction.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
@@ -3167,7 +3169,7 @@ class TestBrokenConfig(ZuulTestCase):
@simple_layout('layouts/broken-double-gate.yaml')
def test_broken_config_on_startup_double_gate(self):
# Verify that duplicated pipeline definitions raise config errors
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
self.assertEquals(
len(tenant.layout.loading_errors), 1,
"An error should have been stored")
@@ -3177,7 +3179,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_dynamic_ignore(self):
# Verify dynamic config behaviors inside a tenant broken config
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
@@ -3209,7 +3211,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_dynamic_fail_unbroken(self):
# Verify dynamic config behaviors inside a tenant broken config
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
@@ -3243,7 +3245,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_dynamic_fail_broken(self):
# Verify dynamic config behaviors inside a tenant broken config
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
@@ -3277,7 +3279,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_dynamic_fix_broken(self):
# Verify dynamic config behaviors inside a tenant broken config
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
@@ -3309,7 +3311,7 @@ class TestBrokenConfig(ZuulTestCase):
def test_dynamic_fail_cross_repo(self):
# Verify dynamic config behaviors inside a tenant broken config
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
# There is a configuration error
self.assertEquals(
len(tenant.layout.loading_errors), 2,
@@ -3356,6 +3358,80 @@ class TestBrokenConfig(ZuulTestCase):
"A should have failed the check pipeline")
+class TestBrokenMultiTenantConfig(ZuulTestCase):
+ # Test we can deal with a broken multi-tenant config
+
+ tenant_config_file = 'config/broken-multi-tenant/main.yaml'
+
+ def test_loading_errors(self):
+ # This regression test came about when we discovered the following:
+
+ # * We cache configuration objects if they load without error
+ # in their first tenant; that means that they can show up as
+ # errors in later tenants, but as long as those other
+ # tenants aren't proposing changes to that repo (which is
+ # unlikely in this situation; this usually arises if the
+ # tenant just wants to use some foreign jobs), users won't
+ # be blocked by the error.
+ #
+ # * If a merge job for a dynamic config change arrives out of
+ # order, we will build the new configuration and if there
+ # are errors, we will compare it to the previous
+ # configuration to determine if they are relevant, but that
+ # caused an error since the previous layout had not been
+ # calculated yet. It's pretty hard to end up with
+ # irrelevant errors except by virtue of the first point
+ # above, which is why this test relies on a second tenant.
+
+ # This test has two tenants. The first loads project2, and
+ # project3 without errors and all config objects are cached.
+ # The second tenant loads only project1 and project2.
+ # Project2 references a job that is defined in project3, so
+ # the tenant loads with an error, but proceeds.
+
+ # Don't run any merge jobs, so we can run them out of order.
+ self.gearman_server.hold_merge_jobs_in_queue = True
+
+ # Create a first change which modifies the config (and
+ # therefore will require a merge job).
+ in_repo_conf = textwrap.dedent(
+ """
+ - job: {'name': 'foo'}
+ """)
+ file_dict = {'.zuul.yaml': in_repo_conf}
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A',
+ files=file_dict)
+
+ # Create a second change which also modifies the config.
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B',
+ files=file_dict)
+ B.setDependsOn(A, 1)
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ # There should be a merge job for each change.
+ self.assertEqual(len(self.scheds.first.sched.merger.jobs), 2)
+
+ jobs = [job for job in self.gearman_server.getQueue()
+ if job.name.startswith(b'merger:')]
+ # Release the second merge job.
+ jobs[-1].waiting = False
+ self.gearman_server.wakeConnections()
+ self.waitUntilSettled()
+
+ # At this point we should still be waiting on the first
+ # change's merge job.
+ self.assertHistory([])
+
+ # Proceed.
+ self.gearman_server.hold_merge_jobs_in_queue = False
+ self.gearman_server.release()
+ self.waitUntilSettled()
+ self.assertHistory([
+ dict(name='base', result='SUCCESS', changes='1,1 2,1'),
+ ])
+
+
class TestProjectKeys(ZuulTestCase):
# Test that we can generate project keys
@@ -3509,7 +3585,7 @@ class TestRoleBranches(RoleTestCase):
'parent-job-pre', 'parent-master-role')
self._addRole('project1', 'master', 'master-role', parent=p)
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
# Push a change to project2 which will run 3 jobs which
# inherit from project1.
self.executor_server.hold_jobs_in_build = True
@@ -3735,7 +3811,7 @@ class TestDataReturn(AnsibleZuulTestCase):
self.waitUntilSettled()
# Make sure skipped jobs are not reported as failing
- tenant = self.sched.abide.tenants.get("tenant-one")
+ tenant = self.scheds.first.sched.abide.tenants.get("tenant-one")
status = tenant.layout.pipelines["check"].formatStatusJSON()
self.assertEqual(
status["change_queues"][0]["heads"][0][0]["failing_reasons"], [])
@@ -4042,7 +4118,7 @@ class TestPragma(ZuulTestCase):
# This is an untrusted repo with 2 branches, so it should have
# an implied branch matcher for the job.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant.layout.getJobs('test-job')
self.assertEqual(len(jobs), 1)
for job in tenant.layout.getJobs('test-job'):
@@ -4070,7 +4146,7 @@ class TestPragma(ZuulTestCase):
# This is an untrusted repo with 2 branches, so it would
# normally have an implied branch matcher, but our pragma
# overrides it.
- tenant = self.sched.abide.tenants.get('tenant-one')
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
jobs = tenant.layout.getJobs('test-job')
self.assertEqual(len(jobs), 1)
for job in tenant.layout.getJobs('test-job'):
diff --git a/tests/unit/test_web.py b/tests/unit/test_web.py
index 200a50e39..af968b1bc 100644
--- a/tests/unit/test_web.py
+++ b/tests/unit/test_web.py
@@ -969,7 +969,9 @@ class TestWebSecrets(BaseTestWeb):
self.assertEqual([secret], run[0]['secrets'])
-class TestInfo(BaseTestWeb):
+class TestInfo(ZuulDBTestCase, BaseTestWeb):
+
+ config_file = 'zuul-sql-driver.conf'
def setUp(self):
super(TestInfo, self).setUp()
@@ -979,40 +981,70 @@ class TestInfo(BaseTestWeb):
statsd_config = self.config_ini_data.get('statsd', {})
self.stats_prefix = statsd_config.get('prefix')
+ def _expected_info(self):
+ return {
+ "info": {
+ "capabilities": {
+ "job_history": True,
+ "auth": {
+ "realms": {},
+ "default_realm": None
+ }
+ },
+ "stats": {
+ "url": self.stats_url,
+ "prefix": self.stats_prefix,
+ "type": "graphite",
+ },
+ "websocket_url": self.websocket_url,
+ }
+ }
+
def test_info(self):
info = self.get_url("api/info").json()
self.assertEqual(
- info, {
- "info": {
- "capabilities": {
- "job_history": False
- },
- "stats": {
- "url": self.stats_url,
- "prefix": self.stats_prefix,
- "type": "graphite",
- },
- "websocket_url": self.websocket_url,
- }
- })
+ info, self._expected_info())
def test_tenant_info(self):
info = self.get_url("api/tenant/tenant-one/info").json()
+ expected_info = self._expected_info()
+ expected_info['info']['tenant'] = 'tenant-one'
self.assertEqual(
- info, {
- "info": {
- "tenant": "tenant-one",
- "capabilities": {
- "job_history": False
- },
- "stats": {
- "url": self.stats_url,
- "prefix": self.stats_prefix,
- "type": "graphite",
- },
- "websocket_url": self.websocket_url,
+ info, expected_info)
+
+
+class TestWebCapabilitiesInfo(TestInfo):
+
+ config_file = 'zuul-admin-web-oidc.conf'
+
+ def _expected_info(self):
+ info = super(TestWebCapabilitiesInfo, self)._expected_info()
+ info['info']['capabilities']['auth'] = {
+ 'realms': {
+ 'myOIDC1': {
+ 'authority': 'http://oidc1',
+ 'client_id': 'zuul',
+ 'type': 'JWT',
+ 'scope': 'openid profile',
+ 'driver': 'OpenIDConnect',
+ },
+ 'myOIDC2': {
+ 'authority': 'http://oidc2',
+ 'client_id': 'zuul',
+ 'type': 'JWT',
+ 'scope': 'openid profile email special-scope',
+ 'driver': 'OpenIDConnect',
+ },
+ 'zuul.example.com': {
+ 'authority': 'zuul_operator',
+ 'client_id': 'zuul.example.com',
+ 'type': 'JWT',
+ 'driver': 'HS256',
}
- })
+ },
+ 'default_realm': 'myOIDC1'
+ }
+ return info
class TestTenantInfoConfigBroken(BaseTestWeb):
@@ -1515,7 +1547,7 @@ class TestTenantScopedWebApi(BaseTestWeb):
self.create_branch('org/project', 'stable')
self.executor_server.hold_jobs_in_build = True
self.commitConfigUpdate('common-config', 'layouts/timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
for _ in iterate_timeout(30, 'Wait for a build on hold'):
@@ -1548,7 +1580,7 @@ class TestTenantScopedWebApi(BaseTestWeb):
self.commitConfigUpdate('common-config',
'layouts/no-timer.yaml')
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
self.waitUntilSettled()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
diff --git a/tests/unit/test_zuultrigger.py b/tests/unit/test_zuultrigger.py
index 0de479928..aeda12433 100644
--- a/tests/unit/test_zuultrigger.py
+++ b/tests/unit/test_zuultrigger.py
@@ -69,11 +69,12 @@ class TestZuulTriggerParentChangeEnqueued(ZuulTestCase):
nonlocal zuultrigger_event_count
if isinstance(args[0], ZuulTriggerEvent):
zuultrigger_event_count += 1
- self.sched.trigger_event_queue.put_orig(*args, **kwargs)
+ self.scheds.first.sched.trigger_event_queue\
+ .put_orig(*args, **kwargs)
- self.sched.trigger_event_queue.put_orig = \
- self.sched.trigger_event_queue.put
- self.sched.trigger_event_queue.put = counting_put
+ self.scheds.first.sched.trigger_event_queue.put_orig = \
+ self.scheds.first.sched.trigger_event_queue.put
+ self.scheds.first.sched.trigger_event_queue.put = counting_put
C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
C.addApproval('Verified', -1)
@@ -150,11 +151,12 @@ class TestZuulTriggerParentChangeEnqueuedGithub(ZuulGithubAppTestCase):
nonlocal zuultrigger_event_count
if isinstance(args[0], ZuulTriggerEvent):
zuultrigger_event_count += 1
- self.sched.trigger_event_queue.put_orig(*args, **kwargs)
+ self.scheds.first.sched.trigger_event_queue\
+ .put_orig(*args, **kwargs)
- self.sched.trigger_event_queue.put_orig = \
- self.sched.trigger_event_queue.put
- self.sched.trigger_event_queue.put = counting_put
+ self.scheds.first.sched.trigger_event_queue.put_orig = \
+ self.scheds.first.sched.trigger_event_queue.put
+ self.scheds.first.sched.trigger_event_queue.put = counting_put
C = self.fake_github.openFakePullRequest('org/project', 'master', 'C')
C.addLabel('for-check') # should go to check
@@ -218,11 +220,17 @@ class TestZuulTriggerProjectChangeMerged(ZuulTestCase):
self.assertTrue("project:{org/project} status:open" in
self.fake_gerrit.queries)
+ # Ensure the gerrit driver has updated its cache after the
+ # previous comments were left:
+ self.fake_gerrit.addEvent(A.getChangeCommentEvent(2))
+ self.fake_gerrit.addEvent(B.getChangeCommentEvent(2))
+ self.waitUntilSettled()
+
# Reconfigure and run the test again. This is a regression
# check to make sure that we don't end up with a stale trigger
# cache that has references to projects from the old
# configuration.
- self.sched.reconfigure(self.config)
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
D.addApproval('Code-Review', 2)
self.fake_gerrit.addEvent(D.addApproval('Approved', 1))
diff --git a/tools/2472D6D0.asc b/tools/2472D6D0.asc
new file mode 100644
index 000000000..42f48ae4d
--- /dev/null
+++ b/tools/2472D6D0.asc
@@ -0,0 +1,21 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v2.0.15 (GNU/Linux)
+
+mQENBFtkV0cBCADStSTCG5qgYtzmWfymHZqxxhfwfS6fdHJcbGUeXsI5dxjeCWhs
+XarZm6rWZOd5WfSmpXhbKOyM6Ll+6bpSl5ICHLa6fcpizYWEPa8fpg9EGl0cF12G
+GgVLnnOZ6NIbsoW0LHt2YN0jn8xKVwyPp7KLHB2paZh+KuURERG406GXY/DgCxUx
+Ffgdelym/gfmt3DSq6GAQRRGHyucMvPYm53r+jVcKsf2Bp6E1XAfqBrD5r0maaCU
+Wvd7bi0B2Q0hIX0rfDCBpl4rFqvyaMPgn+Bkl6IW37zCkWIXqf1E5eDm/XzP881s
++yAvi+JfDwt7AE+Hd2dSf273o3WUdYJGRwyZABEBAAG0OGRldmVsOmt1YmljIE9C
+UyBQcm9qZWN0IDxkZXZlbDprdWJpY0BidWlsZC5vcGVuc3VzZS5vcmc+iQE+BBMB
+CAAoBQJbZFdHAhsDBQkEHrAABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRBN
+ZDkDdQYKpL0BCACEuegX4bxPkdnML70BbvBmDazyuWYZtSAOLoTosVwXBlTeAIoY
+xAw2Sdc895808blU964fuu8IDcR5KuvkTGSGIDBJ6fHZSFD8r68WSS0k1gOY7bUW
+8kkeThlvuMATgULJIYGQ3vYrEhn5e+Pe8fb3AJmQpQWzcjtPX+pk66sVUT6iNaSt
+QtBVGNTQvVDHekF1j/mYmVbIjIWL6whYkdtlu7KYzCx6KhY9u7i3THxY5ICsq64b
+rYcy5FHaVv5SHfTvtu1WtiSlACVfOgPGntpI+Xi4P2F4n4c5I5VKBxpHteW4LGz8
+YOISzS7ZZlbZ7qus353OU0v6UbA660ju5/gGiEYEExECAAYFAltkV0cACgkQOzAR
+t2udZSOoswCdF44NTN09DwhPFbNYhEMb9juP5ykAn0bcELvuKmgDwEwZMrPQkG8t
+Pu9n
+=CNmf
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/tools/encrypt_secret.py b/tools/encrypt_secret.py
index e2ae048cd..32468fd2a 100755
--- a/tools/encrypt_secret.py
+++ b/tools/encrypt_secret.py
@@ -56,8 +56,11 @@ def main():
default=None,
help="The name of the Zuul tenant. This may be "
"required in a multi-tenant environment.")
- parser.add_argument('--strip', action='store_true', default=False,
- help="Strip whitespace from beginning/end of input.")
+ parser.add_argument('--strip', default=None,
+ help='Unused, kept for backward compatibility.')
+ parser.add_argument('--no-strip', action='store_true', default=False,
+ help="Do not strip whitespace from beginning or "
+ "end of input.")
parser.add_argument('--infile',
default=None,
help="A filename whose contents will be encrypted. "
@@ -119,7 +122,7 @@ def main():
plaintext = sys.stdin.read()
plaintext = plaintext.encode("utf-8")
- if args.strip:
+ if not args.no_strip:
plaintext = plaintext.strip()
pubkey_file = tempfile.NamedTemporaryFile(delete=False)
diff --git a/tools/openssl.cnf b/tools/openssl.cnf
new file mode 100644
index 000000000..7d1a8bb6e
--- /dev/null
+++ b/tools/openssl.cnf
@@ -0,0 +1,352 @@
+#
+# OpenSSL example configuration file.
+# This is mostly being used for generation of certificate requests.
+#
+
+# Note that you can include other files from the main configuration
+# file using the .include directive.
+#.include filename
+
+# This definition stops the following lines choking if HOME isn't
+# defined.
+HOME = .
+RANDFILE = $ENV::HOME/.rnd
+
+# Extra OBJECT IDENTIFIER info:
+#oid_file = $ENV::HOME/.oid
+oid_section = new_oids
+
+# To use this configuration file with the "-extfile" option of the
+# "openssl x509" utility, name here the section containing the
+# X.509v3 extensions to use:
+# extensions =
+# (Alternatively, use a configuration file that has only
+# X.509v3 extensions in its main [= default] section.)
+
+[ new_oids ]
+
+# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.
+# Add a simple OID like this:
+# testoid1=1.2.3.4
+# Or use config file substitution like this:
+# testoid2=${testoid1}.5.6
+
+# Policies used by the TSA examples.
+tsa_policy1 = 1.2.3.4.1
+tsa_policy2 = 1.2.3.4.5.6
+tsa_policy3 = 1.2.3.4.5.7
+
+####################################################################
+[ ca ]
+default_ca = CA_default # The default ca section
+
+####################################################################
+[ CA_default ]
+
+dir = ./demoCA # Where everything is kept
+certs = $dir/certs # Where the issued certs are kept
+crl_dir = $dir/crl # Where the issued crl are kept
+database = $dir/index.txt # database index file.
+#unique_subject = no # Set to 'no' to allow creation of
+ # several certs with same subject.
+new_certs_dir = $dir/newcerts # default place for new certs.
+
+certificate = $dir/cacert.pem # The CA certificate
+serial = $dir/serial # The current serial number
+crlnumber = $dir/crlnumber # the current crl number
+ # must be commented out to leave a V1 CRL
+crl = $dir/crl.pem # The current CRL
+private_key = $dir/private/cakey.pem# The private key
+RANDFILE = $dir/private/.rand # private random number file
+
+x509_extensions = usr_cert # The extensions to add to the cert
+
+# Comment out the following two lines for the "traditional"
+# (and highly broken) format.
+name_opt = ca_default # Subject Name options
+cert_opt = ca_default # Certificate field options
+
+# Extension copying option: use with caution.
+# copy_extensions = copy
+
+# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
+# so this is commented out by default to leave a V1 CRL.
+# crlnumber must also be commented out to leave a V1 CRL.
+# crl_extensions = crl_ext
+
+default_days = 365 # how long to certify for
+default_crl_days= 30 # how long before next CRL
+default_md = default # use public key default MD
+preserve = no # keep passed DN ordering
+
+# A few difference way of specifying how similar the request should look
+# For type CA, the listed attributes must be the same, and the optional
+# and supplied fields are just that :-)
+policy = policy_match
+
+# For the CA policy
+[ policy_match ]
+countryName = match
+stateOrProvinceName = match
+organizationName = match
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+# For the 'anything' policy
+# At this point in time, you must list all acceptable 'object'
+# types.
+[ policy_anything ]
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+####################################################################
+[ req ]
+default_bits = 2048
+default_keyfile = privkey.pem
+distinguished_name = req_distinguished_name
+attributes = req_attributes
+x509_extensions = v3_ca # The extensions to add to the self signed cert
+
+# Passwords for private keys if not present they will be prompted for
+# input_password = secret
+# output_password = secret
+
+# This sets a mask for permitted string types. There are several options.
+# default: PrintableString, T61String, BMPString.
+# pkix : PrintableString, BMPString (PKIX recommendation before 2004)
+# utf8only: only UTF8Strings (PKIX recommendation after 2004).
+# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
+# MASK:XXXX a literal mask value.
+# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.
+string_mask = utf8only
+
+# req_extensions = v3_req # The extensions to add to a certificate request
+
+[ req_distinguished_name ]
+countryName = Country Name (2 letter code)
+countryName_default = AU
+countryName_min = 2
+countryName_max = 2
+
+stateOrProvinceName = State or Province Name (full name)
+stateOrProvinceName_default = Some-State
+
+localityName = Locality Name (eg, city)
+
+0.organizationName = Organization Name (eg, company)
+0.organizationName_default = Internet Widgits Pty Ltd
+
+# we can do this but it is not needed normally :-)
+#1.organizationName = Second Organization Name (eg, company)
+#1.organizationName_default = World Wide Web Pty Ltd
+
+organizationalUnitName = Organizational Unit Name (eg, section)
+#organizationalUnitName_default =
+
+commonName = Common Name (e.g. server FQDN or YOUR name)
+commonName_max = 64
+
+emailAddress = Email Address
+emailAddress_max = 64
+
+# SET-ex3 = SET extension number 3
+
+[ req_attributes ]
+challengePassword = A challenge password
+challengePassword_min = 4
+challengePassword_max = 20
+
+unstructuredName = An optional company name
+
+[ usr_cert ]
+
+# These extensions are added when 'ca' signs a request.
+
+# This goes against PKIX guidelines but some CAs do it and some software
+# requires this to avoid interpreting an end user certificate as a CA.
+
+basicConstraints=CA:FALSE
+
+# Here are some examples of the usage of nsCertType. If it is omitted
+# the certificate can be used for anything *except* object signing.
+
+# This is OK for an SSL server.
+# nsCertType = server
+
+# For an object signing certificate this would be used.
+# nsCertType = objsign
+
+# For normal client use this is typical
+# nsCertType = client, email
+
+# and for everything including object signing:
+# nsCertType = client, email, objsign
+
+# This is typical in keyUsage for a client certificate.
+# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+# This will be displayed in Netscape's comment listbox.
+nsComment = "OpenSSL Generated Certificate"
+
+# PKIX recommendations harmless if included in all certificates.
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer
+
+# This stuff is for subjectAltName and issuerAltname.
+# Import the email address.
+# subjectAltName=email:copy
+# An alternative to produce certificates that aren't
+# deprecated according to PKIX.
+# subjectAltName=email:move
+
+# Copy subject details
+# issuerAltName=issuer:copy
+
+#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
+#nsBaseUrl
+#nsRevocationUrl
+#nsRenewalUrl
+#nsCaPolicyUrl
+#nsSslServerName
+
+# This is required for TSA certificates.
+# extendedKeyUsage = critical,timeStamping
+
+[ v3_req ]
+
+# Extensions to add to a certificate request
+
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+[ v3_ca ]
+
+
+# Extensions for a typical CA
+
+
+# PKIX recommendation.
+
+subjectKeyIdentifier=hash
+
+authorityKeyIdentifier=keyid:always,issuer
+
+basicConstraints = critical,CA:true
+
+# Key usage: this is typical for a CA certificate. However since it will
+# prevent it being used as an test self-signed certificate it is best
+# left out by default.
+# keyUsage = cRLSign, keyCertSign
+
+# Some might want this also
+# nsCertType = sslCA, emailCA
+
+# Include email address in subject alt name: another PKIX recommendation
+# subjectAltName=email:copy
+# Copy issuer details
+# issuerAltName=issuer:copy
+
+# DER hex encoding of an extension: beware experts only!
+# obj=DER:02:03
+# Where 'obj' is a standard or added object
+# You can even override a supported extension:
+# basicConstraints= critical, DER:30:03:01:01:FF
+
+[ crl_ext ]
+
+# CRL extensions.
+# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
+
+# issuerAltName=issuer:copy
+authorityKeyIdentifier=keyid:always
+
+[ proxy_cert_ext ]
+# These extensions should be added when creating a proxy certificate
+
+# This goes against PKIX guidelines but some CAs do it and some software
+# requires this to avoid interpreting an end user certificate as a CA.
+
+basicConstraints=CA:FALSE
+
+# Here are some examples of the usage of nsCertType. If it is omitted
+# the certificate can be used for anything *except* object signing.
+
+# This is OK for an SSL server.
+# nsCertType = server
+
+# For an object signing certificate this would be used.
+# nsCertType = objsign
+
+# For normal client use this is typical
+# nsCertType = client, email
+
+# and for everything including object signing:
+# nsCertType = client, email, objsign
+
+# This is typical in keyUsage for a client certificate.
+# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+# This will be displayed in Netscape's comment listbox.
+nsComment = "OpenSSL Generated Certificate"
+
+# PKIX recommendations harmless if included in all certificates.
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer
+
+# This stuff is for subjectAltName and issuerAltname.
+# Import the email address.
+# subjectAltName=email:copy
+# An alternative to produce certificates that aren't
+# deprecated according to PKIX.
+# subjectAltName=email:move
+
+# Copy subject details
+# issuerAltName=issuer:copy
+
+#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
+#nsBaseUrl
+#nsRevocationUrl
+#nsRenewalUrl
+#nsCaPolicyUrl
+#nsSslServerName
+
+# This really needs to be in place for it to be a proxy certificate.
+proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
+
+####################################################################
+[ tsa ]
+
+default_tsa = tsa_config1 # the default TSA section
+
+[ tsa_config1 ]
+
+# These are used by the TSA reply generation only.
+dir = ./demoCA # TSA root directory
+serial = $dir/tsaserial # The current serial number (mandatory)
+crypto_device = builtin # OpenSSL engine to use for signing
+signer_cert = $dir/tsacert.pem # The TSA signing certificate
+ # (optional)
+certs = $dir/cacert.pem # Certificate chain to include in reply
+ # (optional)
+signer_key = $dir/private/tsakey.pem # The TSA private key (optional)
+signer_digest = sha256 # Signing digest to use. (Optional)
+default_policy = tsa_policy1 # Policy if request did not specify it
+ # (optional)
+other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)
+digests = sha1, sha256, sha384, sha512 # Acceptable message digests (mandatory)
+accuracy = secs:1, millisecs:500, microsecs:100 # (optional)
+clock_precision_digits = 0 # number of digits after dot. (optional)
+ordering = yes # Is ordering defined for timestamps?
+ # (optional, default: no)
+tsa_name = yes # Must the TSA name be included in the reply?
+ # (optional, default: no)
+ess_cert_id_chain = no # Must the ESS cert id chain be included?
+ # (optional, default: no)
+ess_cert_id_alg = sha1 # algorithm to compute certificate
+ # identifier (optional, default: sha1)
diff --git a/tools/pip.sh b/tools/pip.sh
index 47b5027f7..7db784690 100755
--- a/tools/pip.sh
+++ b/tools/pip.sh
@@ -23,7 +23,15 @@ then
pip install nodeenv
# Initialize nodeenv and tell it to re-use the currently active virtualenv
# TODO(jeblair): remove node version pin. upath 1.0.4 objects to node >9.
- nodeenv --python-virtualenv -n 10.16.0
+ attempts=0
+ until nodeenv --python-virtualenv -n 10.16.0; do
+ ((attempts++))
+ if [[ $attempts > 2 ]]
+ then
+ echo "Failed creating nodeenv"
+ exit 1
+ fi
+ done
# Use -g because inside of the virtualenv '-g' means 'install into the'
# virtualenv - as opposed to installing into the local node_modules.
# Avoid writing a package-lock.json file since we don't use it.
@@ -39,6 +47,9 @@ then
fi
pip install $*
+# Fail-fast if pip detects conflicts
+pip check
+
# Check if we're installing zuul. If so install the managed ansible as well.
if echo "$*" | grep -vq requirements.txt; then
zuul-manage-ansible -v
diff --git a/tools/zk-ca.sh b/tools/zk-ca.sh
new file mode 100755
index 000000000..78b4e5265
--- /dev/null
+++ b/tools/zk-ca.sh
@@ -0,0 +1,103 @@
+#!/bin/sh -e
+
+# Copyright 2020 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Manage a CA for Zookeeper
+
+CAROOT=$1
+SERVER=$2
+
+SUBJECT='/C=US/ST=California/L=Oakland/O=Company Name/OU=Org'
+TOOLSDIR=$(dirname $0)
+CONFIG="-config $TOOLSDIR/openssl.cnf"
+
+make_ca() {
+ mkdir $CAROOT/demoCA
+ mkdir $CAROOT/demoCA/reqs
+ mkdir $CAROOT/demoCA/newcerts
+ mkdir $CAROOT/demoCA/crl
+ mkdir $CAROOT/demoCA/private
+ chmod 700 $CAROOT/demoCA/private
+ touch $CAROOT/demoCA/index.txt
+ touch $CAROOT/demoCA/index.txt.attr
+ mkdir $CAROOT/certs
+ mkdir $CAROOT/keys
+ mkdir $CAROOT/keystores
+ chmod 700 $CAROOT/keys
+ chmod 700 $CAROOT/keystores
+
+ openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=caroot" \
+ -keyout $CAROOT/demoCA/private/cakey.pem \
+ -out $CAROOT/demoCA/reqs/careq.pem
+ openssl ca $CONFIG -create_serial -days 3560 -batch -selfsign -extensions v3_ca \
+ -out $CAROOT/demoCA/cacert.pem \
+ -keyfile $CAROOT/demoCA/private/cakey.pem \
+ -infiles $CAROOT/demoCA/reqs/careq.pem
+ cp $CAROOT/demoCA/cacert.pem $CAROOT/certs
+}
+
+make_client() {
+ openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=client" \
+ -keyout $CAROOT/keys/clientkey.pem \
+ -out $CAROOT/demoCA/reqs/clientreq.pem
+ openssl ca $CONFIG -batch -policy policy_anything -days 3560 \
+ -out $CAROOT/certs/client.pem \
+ -infiles $CAROOT/demoCA/reqs/clientreq.pem
+}
+
+make_server() {
+ openssl req $CONFIG -new -nodes -subj "$SUBJECT/CN=$SERVER" \
+ -keyout $CAROOT/keys/${SERVER}key.pem \
+ -out $CAROOT/demoCA/reqs/${SERVER}req.pem
+ openssl ca $CONFIG -batch -policy policy_anything -days 3560 \
+ -out $CAROOT/certs/$SERVER.pem \
+ -infiles $CAROOT/demoCA/reqs/${SERVER}req.pem
+ cat $CAROOT/certs/$SERVER.pem $CAROOT/keys/${SERVER}key.pem \
+ > $CAROOT/keystores/$SERVER.pem
+}
+
+help() {
+ echo "$0 CAROOT [SERVER]"
+ echo
+ echo " CAROOT is the path to a directory in which to store the CA"
+ echo " and certificates."
+ echo " SERVER is the FQDN of a server for which a certificate should"
+ echo " be generated"
+}
+
+if [ ! -d "$CAROOT" ]; then
+ echo "CAROOT must be a directory"
+ help
+ exit 1
+fi
+
+cd $CAROOT
+CAROOT=`pwd`
+
+if [ ! -d "$CAROOT/demoCA" ]; then
+ echo 'Generate CA'
+ make_ca
+ echo 'Generate client certificate'
+ make_client
+fi
+
+if [ -f "$CAROOT/certs/$SERVER.pem" ]; then
+ echo "Certificate for $SERVER already exists"
+ exit 0
+fi
+
+if [ "$SERVER" != "" ]; then
+ make_server
+fi
diff --git a/tox.ini b/tox.ini
index 485bf10c9..f7ee8ea2a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion = 3.2
skipsdist = True
-envlist = pep8,py3{-docker}
+envlist = linters,py3{-docker}
ignore_basepython_conflict = True
# TODO(stephenfin): It would be good to set 'requires = tox-docker', but doing
# so borks the tools/pip.sh wrapper we're using here and probably isn't what
@@ -17,14 +17,15 @@ setenv =
OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:1}
OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:1}
passenv =
- ZUUL_TEST_ROOT
- OS_STDOUT_CAPTURE
- OS_STDERR_CAPTURE
+ DOCKER_*
+ NODEPOOL_ZK_HOST
OS_LOG_CAPTURE
OS_LOG_DEFAULTS
- NODEPOOL_ZK_HOST
+ OS_STDERR_CAPTURE
+ OS_STDOUT_CAPTURE
ZUUL_MYSQL_HOST
ZUUL_POSTGRES_HOST
+ ZUUL_TEST_ROOT
usedevelop = True
whitelist_externals = bash
deps =
@@ -45,14 +46,15 @@ install_command = pip install {opts} {packages}
deps = bindep
commands = bindep test
-[testenv:pep8]
+[testenv:linters]
+usedevelop = False
install_command = pip install {opts} {packages}
# --ignore-missing-imports tells mypy to not try to follow imported modules
# out of the current tree. As you might expect, we don't want to run static
# type checking on the world - just on ourselves.
deps =
flake8
- mypy<0.650
+ mypy<0.740
openapi-spec-validator
commands =
flake8 {posargs}
@@ -86,14 +88,15 @@ commands =
[testenv:remote]
passenv =
- ZUUL_TEST_ROOT
- OS_STDOUT_CAPTURE
- OS_STDERR_CAPTURE
+ DOCKER_*
+ NODEPOOL_ZK_HOST
OS_LOG_CAPTURE
OS_LOG_DEFAULTS
- NODEPOOL_ZK_HOST
+ OS_STDERR_CAPTURE
+ OS_STDOUT_CAPTURE
ZUUL_REMOTE_IPV4
ZUUL_SSH_KEY
+ ZUUL_TEST_ROOT
commands =
stestr run --test-path ./tests/remote {posargs}
diff --git a/web/src/containers/build/BuildOutput.jsx b/web/src/containers/build/BuildOutput.jsx
index a06c3abd8..8e25c0cfd 100644
--- a/web/src/containers/build/BuildOutput.jsx
+++ b/web/src/containers/build/BuildOutput.jsx
@@ -68,21 +68,17 @@ class BuildOutput extends React.Component {
<pre key="msg">{task.msg}</pre>
)}
{task.exception && (
- <pre key="exc">{task.exception}</pre>
+ <pre key="exc" style={{ color: 'red' }}>{task.exception}</pre>
)}
{task.stdout_lines && task.stdout_lines.length > 0 && (
- <span key="stdout" style={{whiteSpace: 'pre'}} title="stdout">
- {task.stdout_lines.slice(-42).map((line, idx) => (
- <span key={idx}>{line}<br/></span>))}
- <br />
- </span>
+ <pre key="stdout" style={{ whiteSpace: 'pre-wrap' }} title="stdout">
+ {task.stdout_lines.slice(-42).join('\n')}
+ </pre>
)}
{task.stderr_lines && task.stderr_lines.length > 0 && (
- <span key="stderr" style={{whiteSpace: 'pre'}} title="stderr">
- {task.stderr_lines.slice(-42).map((line, idx) => (
- <span key={idx}>{line}<br/></span>))}
- <br />
- </span>
+ <pre key="stderr" style={{whiteSpace: 'pre-wrap', color: 'red'}} title="stderr">
+ {task.stderr_lines.slice(-42).join('\n')}
+ </pre>
)}
</Panel.Body>
</Panel>
diff --git a/web/src/containers/job/JobVariant.jsx b/web/src/containers/job/JobVariant.jsx
index 515852a58..2bd5984c1 100644
--- a/web/src/containers/job/JobVariant.jsx
+++ b/web/src/containers/job/JobVariant.jsx
@@ -126,7 +126,7 @@ class JobVariant extends React.Component {
}
if (label === 'variables') {
value = (
- <span style={{whiteSpace: 'pre'}}>
+ <span style={{whiteSpace: 'pre-wrap'}}>
<ReactJson
src={value}
name={null}
@@ -138,7 +138,7 @@ class JobVariant extends React.Component {
}
if (label === 'description') {
const style = {
- whiteSpace: 'pre'
+ whiteSpace: 'pre-wrap'
}
if (descriptionMaxHeight > 0) {
style.minHeight = descriptionMaxHeight
diff --git a/web/src/containers/status/ChangePanel.jsx b/web/src/containers/status/ChangePanel.jsx
index 6600e6e8e..3ef8980ff 100644
--- a/web/src/containers/status/ChangePanel.jsx
+++ b/web/src/containers/status/ChangePanel.jsx
@@ -73,6 +73,24 @@ class ChangePanel extends React.Component {
return <span className={status}>{text}</span>
}
+ jobStrResult (job) {
+ let result = job.result ? job.result.toLowerCase() : null
+ if (result === null) {
+ if (job.url === null) {
+ if (job.queued === false) {
+ result = 'waiting'
+ } else {
+ result = 'queued'
+ }
+ } else if (job.paused !== null && job.paused) {
+ result = 'paused'
+ } else {
+ result = 'in progress'
+ }
+ }
+ return result
+ }
+
renderChangeLink (change) {
let changeId = change.id || 'NA'
let changeTitle = changeId
@@ -106,10 +124,7 @@ class ChangePanel extends React.Component {
return (
<div className='progress zuul-change-total-result'>
{change.jobs.map((job, idx) => {
- let result = job.result ? job.result.toLowerCase() : null
- if (result === null) {
- result = job.url ? 'in progress' : 'queued'
- }
+ let result = this.jobStrResult(job)
if (result !== 'queued') {
let className = ''
switch (result) {
@@ -121,13 +136,15 @@ class ChangePanel extends React.Component {
className = ' progress-bar-danger'
break
case 'unstable':
+ case 'retry_limit':
+ case 'post_failure':
+ case 'node_failure':
className = ' progress-bar-warning'
break
case 'paused':
+ case 'skipped':
className = ' progress-bar-info'
break
- case 'in progress':
- break
default:
break
}
@@ -250,20 +267,7 @@ class ChangePanel extends React.Component {
name = <span className='zuul-job-name'>{job_name}</span>
}
let resultBar
- let result = job.result ? job.result.toLowerCase() : null
- if (result === null) {
- if (job.url === null) {
- if (job.queued === false) {
- result = 'waiting'
- } else {
- result = 'queued'
- }
- } else if (job.paused !== null && job.paused) {
- result = 'paused'
- } else {
- result = 'in progress'
- }
- }
+ let result = this.jobStrResult(job)
if (result === 'in progress') {
resultBar = this.renderJobProgressBar(
job.elapsed_time, job.remaining_time)
diff --git a/web/src/pages/ConfigErrors.jsx b/web/src/pages/ConfigErrors.jsx
index 05aceec5d..aa0f00b46 100644
--- a/web/src/pages/ConfigErrors.jsx
+++ b/web/src/pages/ConfigErrors.jsx
@@ -51,7 +51,7 @@ class ConfigErrorsPage extends React.Component {
return (
<li className="list-group-item" key={idx}>
<h3>{item.source_context.project} - {ctxPath}</h3>
- <p style={{whiteSpace: 'pre'}}>
+ <p style={{whiteSpace: 'pre-wrap'}}>
{item.error}
</p>
</li>
diff --git a/web/src/pages/Status.jsx b/web/src/pages/Status.jsx
index 2328ccadb..ad169e556 100644
--- a/web/src/pages/Status.jsx
+++ b/web/src/pages/Status.jsx
@@ -77,8 +77,8 @@ class StatusPage extends Refreshable {
this.visibilityChangeEvent, this.visibilityListener, false)
}
- setCookie (name, value) {
- document.cookie = name + '=' + value + '; path=/'
+ setCookie (name, value, pathname) {
+ document.cookie = name + '=' + value + '; path=' + pathname
}
updateData = (force) => {
@@ -114,7 +114,7 @@ class StatusPage extends Refreshable {
setFilter = (filter) => {
this.filter.value = filter
this.setState({filter: filter})
- this.setCookie('zuul_filter_string', filter)
+ this.setCookie('zuul_filter_string', filter, window.location.pathname)
}
handleKeyPress = (e) => {
@@ -127,7 +127,7 @@ class StatusPage extends Refreshable {
handleCheckBox = (e) => {
this.setState({expanded: e.target.checked})
- this.setCookie('zuul_expand_by_default', e.target.checked)
+ this.setCookie('zuul_expand_by_default', e.target.checked, '/')
}
loadState = () => {
diff --git a/zuul/ansible/base/callback/zuul_stream.py b/zuul/ansible/base/callback/zuul_stream.py
index ca34dc2ab..7cae6efa5 100644
--- a/zuul/ansible/base/callback/zuul_stream.py
+++ b/zuul/ansible/base/callback/zuul_stream.py
@@ -267,6 +267,9 @@ class CallbackModule(default.CallbackModule):
if task.loop:
# Don't try to stream from loops
continue
+ if play_vars[host].get('ansible_connection') in ('winrm',):
+ # The winrm connections don't support streaming for now
+ continue
if play_vars[host].get('ansible_connection') in ('kubectl', ):
# Stream from the forwarded port on kubectl conns
port = play_vars[host]['zuul']['resources'][
@@ -660,7 +663,7 @@ class CallbackModule(default.CallbackModule):
host=hostname, status=status, msg=msg))
else:
self._log("{host} | {status}".format(
- host=hostname, status=status, msg=msg))
+ host=hostname, status=status))
if result_dict:
result_string = json.dumps(result_dict, indent=2, sort_keys=True)
for line in result_string.split('\n'):
diff --git a/zuul/ansible/logconfig.py b/zuul/ansible/logconfig.py
index 40aa7736c..fb1128110 100644
--- a/zuul/ansible/logconfig.py
+++ b/zuul/ansible/logconfig.py
@@ -106,6 +106,10 @@ _DEFAULT_SERVER_LOGGING_CONFIG = {
'handlers': ['console'],
'level': 'WARN',
},
+ 'alembic.runtime.migration': {
+ 'handlers': ['console'],
+ 'level': 'INFO',
+ },
'alembic': {
'handlers': ['console'],
'level': 'WARN',
diff --git a/zuul/cmd/scheduler.py b/zuul/cmd/scheduler.py
index 82a589091..308118f09 100755
--- a/zuul/cmd/scheduler.py
+++ b/zuul/cmd/scheduler.py
@@ -149,10 +149,17 @@ class Scheduler(zuul.cmd.ZuulDaemonApp):
zookeeper_hosts = get_default(self.config, 'zookeeper', 'hosts', None)
if not zookeeper_hosts:
raise Exception("The zookeeper hosts config value is required")
+ zookeeper_tls_key = get_default(self.config, 'zookeeper', 'tls_key')
+ zookeeper_tls_cert = get_default(self.config, 'zookeeper', 'tls_cert')
+ zookeeper_tls_ca = get_default(self.config, 'zookeeper', 'tls_ca')
zookeeper_timeout = float(get_default(self.config, 'zookeeper',
'session_timeout', 10.0))
-
- zookeeper.connect(zookeeper_hosts, timeout=zookeeper_timeout)
+ zookeeper.connect(
+ zookeeper_hosts,
+ timeout=zookeeper_timeout,
+ tls_cert=zookeeper_tls_cert,
+ tls_key=zookeeper_tls_key,
+ tls_ca=zookeeper_tls_ca)
self.configure_connections()
self.sched.setExecutor(gearman)
@@ -165,7 +172,7 @@ class Scheduler(zuul.cmd.ZuulDaemonApp):
self.sched.start()
self.sched.registerConnections(self.connections)
self.sched.reconfigure(self.config)
- self.sched.resume()
+ self.sched.wakeUp()
except Exception:
self.log.exception("Error starting Zuul:")
# TODO(jeblair): If we had all threads marked as daemon,
diff --git a/zuul/cmd/web.py b/zuul/cmd/web.py
index b992e621c..fe4b2ca4f 100755
--- a/zuul/cmd/web.py
+++ b/zuul/cmd/web.py
@@ -86,6 +86,12 @@ class WebServer(zuul.cmd.ZuulDaemonApp):
self.config, 'zookeeper', 'hosts', None)
if not params["zk_hosts"]:
raise Exception("The zookeeper hosts config value is required")
+ params["zk_tls_key"] = get_default(self.config, 'zookeeper', 'tls_key')
+ params["zk_tls_cert"] = get_default(self.config,
+ 'zookeeper', 'tls_cert')
+ params["zk_tls_ca"] = get_default(self.config, 'zookeeper', 'tls_ca')
+ params["zk_timeout"] = float(get_default(self.config, 'zookeeper',
+ 'session_timeout', 10.0))
try:
self.web = zuul.web.ZuulWeb(**params)
@@ -99,13 +105,7 @@ class WebServer(zuul.cmd.ZuulDaemonApp):
self.log.info('Zuul Web Server starting')
self.web.start()
- try:
- signal.pause()
- except KeyboardInterrupt:
- print("Ctrl + C: asking web server to exit nicely...\n")
- self.exit_handler(signal.SIGINT, None)
-
- self.web.stop()
+ self.web.join()
self.log.info("Zuul Web Server stopped")
def configure_authenticators(self):
diff --git a/zuul/configloader.py b/zuul/configloader.py
index 4eb64a121..f985804c6 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -29,8 +29,10 @@ from zuul.lib import yamlutil as yaml
import zuul.manager.dependent
import zuul.manager.independent
import zuul.manager.supercedent
+import zuul.manager.serial
from zuul.lib import encryption
from zuul.lib.keystorage import KeyStorage
+from zuul.lib.logutil import get_annotated_logger
from zuul.lib.re2util import filter_allowed_disallowed
@@ -1177,6 +1179,7 @@ class PipelineParser(object):
def getSchema(self):
manager = vs.Any('independent',
'dependent',
+ 'serial',
'supercedent')
precedence = vs.Any('normal', 'low', 'high')
@@ -1291,6 +1294,9 @@ class PipelineParser(object):
elif manager_name == 'independent':
manager = zuul.manager.independent.IndependentPipelineManager(
self.pcontext.scheduler, pipeline)
+ elif manager_name == 'serial':
+ manager = zuul.manager.serial.SerialPipelineManager(
+ self.pcontext.scheduler, pipeline)
elif manager_name == 'supercedent':
manager = zuul.manager.supercedent.SupercedentPipelineManager(
self.pcontext.scheduler, pipeline)
@@ -2319,7 +2325,9 @@ class ConfigLoader(object):
def createDynamicLayout(self, tenant, files, ansible_manager,
include_config_projects=False,
- scheduler=None, connections=None):
+ scheduler=None, connections=None,
+ zuul_event_id=None):
+ log = get_annotated_logger(self.log, zuul_event_id)
loading_errors = model.LoadingErrors()
if include_config_projects:
config = model.ParsedConfig()
@@ -2337,7 +2345,7 @@ class ConfigLoader(object):
layout = model.Layout(tenant)
layout.loading_errors = loading_errors
- self.log.debug("Created layout id %s", layout.uuid)
+ log.debug("Created layout id %s", layout.uuid)
if not include_config_projects:
# NOTE: the actual pipeline objects (complete with queues
# and enqueued items) are copied by reference here. This
diff --git a/zuul/driver/auth/jwt.py b/zuul/driver/auth/jwt.py
index f8fb5af01..5df48971b 100644
--- a/zuul/driver/auth/jwt.py
+++ b/zuul/driver/auth/jwt.py
@@ -54,6 +54,16 @@ class JWTAuthenticator(AuthenticatorInterface):
except ValueError:
raise ValueError('"max_validity_time" must be a numerical value')
+ def get_capabilities(self):
+ return {
+ self.realm: {
+ 'authority': self.issuer_id,
+ 'client_id': self.audience,
+ 'type': 'JWT',
+ 'driver': getattr(self, 'name', 'N/A'),
+ }
+ }
+
def _decode(self, rawToken):
raise NotImplementedError
@@ -173,7 +183,7 @@ class OpenIDConnectAuthenticator(JWTAuthenticator):
described in
https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig""" # noqa
- # default algorithm, TOFO: should this be a config param?
+ # default algorithm, TODO: should this be a config param?
algorithm = 'RS256'
name = 'OpenIDConnect'
@@ -234,6 +244,11 @@ class OpenIDConnectAuthenticator(JWTAuthenticator):
msg='There was an error while fetching '
'OpenID configuration, check logs for details')
+ def get_capabilities(self):
+ d = super(OpenIDConnectAuthenticator, self).get_capabilities()
+ d[self.realm]['scope'] = self.scope
+ return d
+
def _decode(self, rawToken):
unverified_headers = jwt.get_unverified_header(rawToken)
key_id = unverified_headers.get('kid', None)
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index a75252ad9..0d0913789 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -48,6 +48,10 @@ from zuul.model import Ref, Tag, Branch, Project
TIMEOUT = 30
+class HTTPConflictException(Exception):
+ message = "Received response 409"
+
+
class GerritChangeData(object):
"""Compatability layer for SSH/HTTP
@@ -613,7 +617,9 @@ class GerritConnection(BaseConnection):
auth=self.auth, timeout=TIMEOUT,
headers={'User-Agent': self.user_agent})
self.iolog.debug('Received: %s %s' % (r.status_code, r.text,))
- if r.status_code != 200:
+ if r.status_code == 409:
+ raise HTTPConflictException()
+ elif r.status_code != 200:
raise Exception("Received response %s" % (r.status_code,))
ret = None
if r.text and len(r.text) > 4:
@@ -637,7 +643,9 @@ class GerritConnection(BaseConnection):
headers={'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.iolog.debug('Received: %s %s' % (r.status_code, r.text,))
- if r.status_code != 200:
+ if r.status_code == 409:
+ raise HTTPConflictException()
+ elif r.status_code != 200:
raise Exception("Received response %s" % (r.status_code,))
ret = None
if r.text and len(r.text) > 4:
@@ -1065,6 +1073,9 @@ class GerritConnection(BaseConnection):
(changeid, change.commit, uuid),
checkinfo)
break
+ except HTTPConflictException:
+ log.exception("Conflict submitting check data to gerrit.")
+ break
except Exception:
log.exception("Error submitting check data to gerrit, "
"attempt %s", x)
@@ -1108,6 +1119,9 @@ class GerritConnection(BaseConnection):
(changeid, change.commit),
data)
break
+ except HTTPConflictException:
+ log.exception("Conflict submitting data to gerrit.")
+ break
except Exception:
log.exception(
"Error submitting data to gerrit, attempt %s", x)
@@ -1117,6 +1131,9 @@ class GerritConnection(BaseConnection):
try:
self.post('changes/%s/submit' % (changeid,), {})
break
+ except HTTPConflictException:
+ log.exception("Conflict submitting data to gerrit.")
+ break
except Exception:
log.exception(
"Error submitting data to gerrit, attempt %s", x)
@@ -1261,7 +1278,9 @@ class GerritConnection(BaseConnection):
auth=self.auth, timeout=TIMEOUT,
headers={'User-Agent': self.user_agent})
self.iolog.debug('Received: %s %s' % (r.status_code, r.text,))
- if r.status_code != 200:
+ if r.status_code == 409:
+ raise HTTPConflictException()
+ elif r.status_code != 200:
raise Exception("Received response %s" % (r.status_code,))
out = r.text[r.text.find('\n') + 5:]
else:
diff --git a/zuul/driver/gerrit/gerritsource.py b/zuul/driver/gerrit/gerritsource.py
index 2da5076cc..a59781d09 100644
--- a/zuul/driver/gerrit/gerritsource.py
+++ b/zuul/driver/gerrit/gerritsource.py
@@ -107,8 +107,8 @@ class GerritSource(BaseSource):
return changes
def getCachedChanges(self):
- for x in self.connection._change_cache.values():
- for y in x.values():
+ for x in list(self.connection._change_cache.values()):
+ for y in list(x.values()):
yield y
def getProject(self, name):
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 077fb9a63..a054d9e49 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -52,6 +52,16 @@ PREVIEW_JSON_ACCEPT = 'application/vnd.github.machine-man-preview+json'
PREVIEW_DRAFT_ACCEPT = 'application/vnd.github.shadow-cat-preview+json'
PREVIEW_CHECKS_ACCEPT = 'application/vnd.github.antiope-preview+json'
+# NOTE (felix): Using log levels for file comments / annotations is IMHO more
+# convenient than the values Github expects. Having in mind that those comments
+# most probably come from various linters, "info", "warning" and "error"
+# should be more general terms than "notice", "warning" and "failure".
+ANNOTATION_LEVELS = {
+ "info": "notice",
+ "warning": "warning",
+ "error": "failure",
+}
+
def _sign_request(body, secret):
signature = 'sha1=' + hmac.new(
@@ -2011,19 +2021,20 @@ class GithubConnection(BaseConnection):
start_column = rng.get("start_character")
end_column = rng.get("end_character")
- # TODO (felix): Make annotation_level configurable via
- # file_comments in zuul_return. Other reporters like Gerrit
- # might ignore the field if they don't support it.
- # Accepted values are "notice", "warning", "failure".
- # A "failure" annotation won't declare the check run as
- # failure.
+ # Map the level coming from zuul_return to the ones Github
+ # expects. Each Github annotation must provide a level, so
+ # we fall back to "warning" in case no or an invalid level
+ # is provided.
+ annotation_level = ANNOTATION_LEVELS.get(
+ comment.get("level"), "warning"
+ )
# A Github check annotation requires at least the following
# attributes: "path", "start_line", "end_line", "message" and
# "annotation_level"
raw_annotation = {
"path": fn,
- "annotation_level": "warning",
+ "annotation_level": annotation_level,
"message": comment["message"],
"start_line": start_line,
"end_line": end_line,
@@ -2074,6 +2085,17 @@ class GithubConnection(BaseConnection):
statuses.append("%s:%s:%s" % stuple)
seen.append("%s:%s" % (stuple[0], stuple[1]))
+ # Although Github differentiates commit statuses and commit checks via
+ # their respective APIs, the branch protection the status section
+ # (below the comments of a PR) do not differentiate between both. Thus,
+ # to mimic this behaviour also in Zuul, a required_status in the
+ # pipeline config could map to either a status or a check.
+ for check in self.getCommitChecks(project.name, sha, event):
+ ctuple = _check_as_tuple(check)
+ if "{}:{}".format(ctuple[0], ctuple[1]) not in seen:
+ statuses.append("{}:{}:{}".format(*ctuple))
+ seen.append("{}:{}".format(ctuple[0], ctuple[1]))
+
return statuses
def getWebController(self, zuul_web):
diff --git a/zuul/driver/github/githubsource.py b/zuul/driver/github/githubsource.py
index 50f85b075..2b647b683 100644
--- a/zuul/driver/github/githubsource.py
+++ b/zuul/driver/github/githubsource.py
@@ -95,7 +95,7 @@ class GithubSource(BaseSource):
return self.connection.getChangesDependingOn(change, projects, tenant)
def getCachedChanges(self):
- return self.connection._change_cache.values()
+ return list(self.connection._change_cache.values())
def getProject(self, name):
p = self.connection.getProject(name)
diff --git a/zuul/driver/gitlab/gitlabsource.py b/zuul/driver/gitlab/gitlabsource.py
index d477adeb7..65033c305 100644
--- a/zuul/driver/gitlab/gitlabsource.py
+++ b/zuul/driver/gitlab/gitlabsource.py
@@ -59,7 +59,7 @@ class GitlabSource(BaseSource):
raise NotImplementedError()
def getCachedChanges(self):
- return self.connection._change_cache.values()
+ return list(self.connection._change_cache.values())
def getProject(self, name):
p = self.connection.getProject(name)
diff --git a/zuul/driver/pagure/pagureconnection.py b/zuul/driver/pagure/pagureconnection.py
index 3d7ff6a9a..becc803c8 100644
--- a/zuul/driver/pagure/pagureconnection.py
+++ b/zuul/driver/pagure/pagureconnection.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Red Hat, Inc.
+# Copyright 2018, 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -38,34 +38,43 @@ from zuul.driver.pagure.paguremodel import PagureTriggerEvent, PullRequest
# Minimal Pagure version supported 5.3.0
#
-# Pagure is similar to Github as it handles Pullrequest where PR is a branch
+# Pagure is similar to Github as it handles PullRequest where PR is a branch
# composed of one or more commits. A PR can be commented, evaluated, updated,
# CI flagged, and merged. A PR can be flagged (success/failure/pending) and
-# this driver use that capability. Code review (evaluation) is done via
-# comments that contains a :thumbsup: or :thumbsdown:. Pagure computes a
-# score based on that and allow or not the merge of PR if the "minimal score to
-# merge" is set in repository settings. This driver uses that setting and need
-# to be set. This driver expects to receive repository events via webhooks and
-# expects to verify payload signature. The driver connection needs an user's
-# API key with the "Modify an existing project" access. This user needs to be
-# added as admin against projects to be gated by Zuul.
+# this driver uses that capability.
+#
+# PR approval can be driven by (evaluation). This is done via comments that
+# contains a :thumbsup: or :thumbsdown:. Pagure computes a score based on
+# that and allows or not the merge of PR if the "minimal score to merge" is
+# set in repository settings.
+#
+# PR approval can be also driven via PR metadata flag.
+#
+# This driver expects to receive repository events via webhooks and
+# do event validation based on the source IP address of the event.
+#
+# The driver connection needs an user's API token with
+# - "Merge a pull-request"
+# - "Flag a pull-request"
+# - "Comment on a pull-request"
+#
+# On each project to be integrated with Zuul needs:
#
# The web hook target must be (in repository settings):
# - http://<zuul-web>/zuul/api/connection/<conn-name>/payload
#
# Repository settings (to be checked):
-# - Always merge (Better to match internal merge strategy of Zuul)
-# - Minimum score to merge pull-request
-# - Notify on pull-request flag
+# - Minimum score to merge pull-request = 0 or -1
# - Pull requests
+# - Open metadata access to all (unchecked if approval)
#
# To define the connection in /etc/zuul/zuul.conf:
-# [connection pagure.sftests.com]
+# [connection pagure.io]
# driver=pagure
-# server=pagure.sftests.com
-# baseurl=https://pagure.sftests.com/pagure
-# cloneurl=https://pagure.sftests.com/pagure/git
+# server=pagure.io
+# baseurl=https://pagure.io
# api_token=QX29SXAW96C2CTLUNA5JKEEU65INGWTO2B5NHBDBRMF67S7PYZWCS0L1AKHXXXXX
+# source_whitelist=8.43.85.75
#
# Current Non blocking issues:
# - Pagure does not reset the score when a PR code is updated
@@ -85,9 +94,6 @@ from zuul.driver.pagure.paguremodel import PagureTriggerEvent, PullRequest
# https://docs.pagure.org/pagure/usage/project_settings.html?highlight=score#activate-only-assignee-can-merge-pull-request
-TOKEN_VALIDITY = 60 * 24 * 3600
-
-
def _sign_request(body, secret):
signature = hmac.new(
secret.encode('utf-8'), body, hashlib.sha1).hexdigest()
@@ -199,6 +205,7 @@ class PagureEventConnector(threading.Thread):
'pull-request.closed': self._event_pull_request_closed,
'pull-request.new': self._event_pull_request,
'pull-request.flag.added': self._event_flag_added,
+ 'pull-request.flag.updated': self._event_flag_added,
'git.receive': self._event_ref_updated,
'git.branch.creation': self._event_ref_created,
'git.branch.deletion': self._event_ref_deleted,
@@ -400,33 +407,22 @@ class PagureAPIClient():
log = logging.getLogger("zuul.PagureAPIClient")
def __init__(
- self, baseurl, api_token, project, token_exp_date=None):
+ self, baseurl, api_token, project):
self.session = requests.Session()
self.base_url = '%s/api/0/' % baseurl
self.api_token = api_token
self.project = project
self.headers = {'Authorization': 'token %s' % self.api_token}
- self.token_exp_date = token_exp_date
def _manage_error(self, data, code, url, verb):
if code < 400:
return
else:
- if data.get('error_code', '') == 'EINVALIDTOK':
- # Reset the expiry date of the cached API client
- # to force the driver to refresh connectors
- self.token_exp_date = int(time.time())
raise PagureAPIClientException(
"Unable to %s on %s (code: %s) due to: %s" % (
verb, url, code, data
))
- def is_expired(self):
- if self.token_exp_date:
- if int(time.time()) > (self.token_exp_date - 3600):
- return True
- return False
-
def get(self, url):
self.log.debug("Getting resource %s ..." % url)
ret = self.session.get(url, headers=self.headers)
@@ -442,6 +438,12 @@ class PagureAPIClient():
ret.status_code, ret.text))
return ret.json(), ret.status_code, ret.url, 'POST'
+ def whoami(self):
+ path = '-/whoami'
+ resp = self.post(self.base_url + path)
+ self._manage_error(*resp)
+ return resp[0]['username']
+
def get_project_branches(self):
path = '%s/git/branches' % self.project
resp = self.get(self.base_url + path)
@@ -460,23 +462,29 @@ class PagureAPIClient():
self._manage_error(*resp)
return resp[0]
- def get_pr_flags(self, number, last=False):
+ def get_pr_flags(self, number, owner, last=False):
path = '%s/pull-request/%s/flag' % (self.project, number)
resp = self.get(self.base_url + path)
self._manage_error(*resp)
data = resp[0]
+ owned_flags = [
+ flag for flag in data['flags']
+ if flag['user']['name'] == owner]
if last:
- if data['flags']:
- return data['flags'][0]
+ if owned_flags:
+ return owned_flags[0]
else:
return {}
else:
- return data['flags']
+ return owned_flags
- def set_pr_flag(self, number, status, url, description):
+ def set_pr_flag(
+ self, number, status, url, description, app_name, username):
+ flag_uid = "%s-%s-%s" % (username, number, self.project)
params = {
- "username": "Zuul",
+ "username": app_name,
"comment": "Jobs result is %s" % status,
+ "uid": flag_uid[:32],
"status": status,
"url": url}
path = '%s/pull-request/%s/flag' % (self.project, number)
@@ -497,78 +505,18 @@ class PagureAPIClient():
self._manage_error(*resp)
return resp[0]
- def create_project_api_token(self):
- """ A project admin user's api token must be use with that endpoint
+ def get_webhook_token(self):
+ """ A project collaborator's api token must be used with that endpoint
"""
- param = {
- "description": "zuul-token-%s" % int(time.time()),
- "acls": [
- "pull_request_merge", "pull_request_comment",
- "pull_request_flag"]
- }
- path = '%s/token/new' % self.project
- resp = self.post(self.base_url + path, param)
- self._manage_error(*resp)
- # {"token": {"description": "mytoken", "id": "IED2HC...4QIXS6WPZDTET"}}
- return resp[0]['token']
-
- def get_connectors(self):
- """ A project admin user's api token must be use with that endpoint
- """
- def get_token_epoch(token):
- return int(token['description'].split('-')[-1])
-
- path = '%s/connector' % self.project
+ path = '%s/webhook/token' % self.project
resp = self.get(self.base_url + path)
- if resp[1] >= 400:
- # Admin API token is probably invalid or expired
- self.log.error(
- ("Unable to get connectors for project %s probably due to "
- "an invalid or expired admin API token: %s") % (
- self.project, resp[0]))
- # Allow to process but return empty project API and webhook token
- # Web hook events for the related project will be denied and
- # POST on the API will be denied as well.
- return {"id": "", "created_at": int(time.time())}, ""
- data = resp[0]
- # {"connector": {
- # "hook_token": "WCL92MLWMRPGKBQ5LI0LZCSIS4TRQMHR0Q",
- # "api_tokens": [
- # {
- # "description": "zuul-token-123",
- # "expired": false,
- # "id": "X03J4DOJT7P3G4....3DNPPXN4G144BBIAJ"
- # }
- # ]
- # }}
- # Filter expired tokens
- tokens = [
- token for token in data['connector'].get('api_tokens', {})
- if not token['expired']]
- # Now following the pattern zuul-token-{epoch} find the last
- # one created
- api_token = None
- for token in tokens:
- if not token['description'].startswith('zuul-token-'):
- continue
- epoch = get_token_epoch(token)
- if api_token:
- if epoch > get_token_epoch(api_token):
- api_token = token
- else:
- api_token = token
- if not api_token:
- # Let's create one
- api_token = self.create_project_api_token()
- api_token['created_at'] = get_token_epoch(api_token)
- webhook_token = data['connector']['hook_token']
- return api_token, webhook_token
+ self._manage_error(*resp)
+ return resp[0]['webhook']['token']
class PagureConnection(BaseConnection):
driver_name = 'pagure'
log = logging.getLogger("zuul.PagureConnection")
- payload_path = 'payload'
def __init__(self, driver, connection_name, connection_config):
super(PagureConnection, self).__init__(
@@ -580,12 +528,17 @@ class PagureConnection(BaseConnection):
self.canonical_hostname = self.connection_config.get(
'canonical_hostname', self.server)
self.git_ssh_key = self.connection_config.get('sshkey')
- self.admin_api_token = self.connection_config.get('api_token')
+ self.api_token = self.connection_config.get('api_token')
+ self.app_name = self.connection_config.get(
+ 'app_name', 'Zuul')
+ self.username = None
self.baseurl = self.connection_config.get(
'baseurl', 'https://%s' % self.server).rstrip('/')
self.cloneurl = self.connection_config.get(
'cloneurl', self.baseurl).rstrip('/')
- self.connectors = {}
+ self.source_whitelist = self.connection_config.get(
+ 'source_whitelist', '').split(',')
+ self.webhook_tokens = {}
self.source = driver.getSource(self)
self.event_queue = queue.Queue()
self.metadata_notif = re.compile(
@@ -623,45 +576,31 @@ class PagureConnection(BaseConnection):
def eventDone(self):
self.event_queue.task_done()
- def _refresh_project_connectors(self, project):
- pagure = PagureAPIClient(
- self.baseurl, self.admin_api_token, project)
- api_token, webhook_token = pagure.get_connectors()
- connector = self.connectors.setdefault(
- project, {'api_client': None, 'webhook_token': None})
- api_token_exp_date = api_token['created_at'] + TOKEN_VALIDITY
- connector['api_client'] = PagureAPIClient(
- self.baseurl, api_token['id'], project,
- token_exp_date=api_token_exp_date)
- connector['webhook_token'] = webhook_token
- return connector
-
- def get_project_webhook_token(self, project):
- token = self.connectors.get(
- project, {}).get('webhook_token', None)
- if token:
+ def set_my_username(self, client):
+ self.log.debug("Fetching my username ...")
+ self.username = client.whoami()
+ self.log.debug("My username is %s" % self.username)
+
+ def get_project_api_client(self, project):
+ self.log.debug("Building project %s api_client" % project)
+ client = PagureAPIClient(self.baseurl, self.api_token, project)
+ if not self.username:
+ self.set_my_username(client)
+ return client
+
+ def get_project_webhook_token(self, project, force_refresh=False):
+ token = self.webhook_tokens.get(project)
+ if token and not force_refresh:
self.log.debug(
- "Fetching project %s webhook_token from cache" % project)
+ "Fetching project %s webhook token from cache" % project)
return token
else:
+ pagure = self.get_project_api_client(project)
+ token = pagure.get_webhook_token()
+ self.webhook_tokens[project] = token
self.log.debug(
- "Fetching project %s webhook_token from API" % project)
- return self._refresh_project_connectors(project)['webhook_token']
-
- def get_project_api_client(self, project):
- api_client = self.connectors.get(
- project, {}).get('api_client', None)
- if api_client:
- if not api_client.is_expired():
- self.log.debug(
- "Fetching project %s api_client from cache" % project)
- return api_client
- else:
- self.log.debug(
- "Project %s api token is expired (expiration date %s)" % (
- project, api_client.token_exp_date))
- self.log.debug("Building project %s api_client" % project)
- return self._refresh_project_connectors(project)['api_client']
+ "Fetching project %s webhook token from API" % project)
+ return token
def maintainCache(self, relevant):
remove = set()
@@ -779,7 +718,7 @@ class PagureConnection(BaseConnection):
def _hasRequiredStatusChecks(self, change):
pagure = self.get_project_api_client(change.project.name)
- flag = pagure.get_pr_flags(change.number, last=True)
+ flag = pagure.get_pr_flags(change.number, self.username, last=True)
return True if flag.get('status', '') == 'success' else False
def canMerge(self, change, allow_needs, event=None):
@@ -887,14 +826,15 @@ class PagureConnection(BaseConnection):
def setCommitStatus(self, project, number, state, url='',
description='', context=''):
pagure = self.get_project_api_client(project)
- pagure.set_pr_flag(number, state, url, description)
+ pagure.set_pr_flag(
+ number, state, url, description, self.app_name, self.username)
self.log.info("Set pull-request CI flag status : %s" % description)
# Wait for 1 second as flag timestamp is by second
time.sleep(1)
def getCommitStatus(self, project, number):
pagure = self.get_project_api_client(project)
- flag = pagure.get_pr_flags(number, last=True)
+ flag = pagure.get_pr_flags(number, self.username, last=True)
self.log.info(
"Got pull-request CI status for PR %s on %s status: %s" % (
number, project, flag.get('status')))
@@ -928,30 +868,42 @@ class PagureWebController(BaseWebController):
self.connection = connection
self.zuul_web = zuul_web
- def _validate_signature(self, body, headers):
- try:
- request_signature = headers['x-pagure-signature']
- except KeyError:
- raise cherrypy.HTTPError(401, 'x-pagure-signature header missing.')
-
- project = headers['x-pagure-project']
- token = self.connection.get_project_webhook_token(project)
- if not token:
- raise cherrypy.HTTPError(
- 401, 'no webhook token for %s.' % project)
+ def _source_whitelisted(self, remote_ip, forwarded_ip):
+ if remote_ip and remote_ip in self.connection.source_whitelist:
+ return True
+ if forwarded_ip and forwarded_ip in self.connection.source_whitelist:
+ return True
+ def _validate(self, body, token, request_signature):
signature, payload = _sign_request(body, token)
-
if not hmac.compare_digest(str(signature), str(request_signature)):
- self.log.debug(
+ self.log.info(
"Missmatch (Payload Signature: %s, Request Signature: %s)" % (
signature, request_signature))
+ return False
+ return True
+
+ def _validate_signature(self, body, headers):
+ try:
+ request_signature = headers['x-pagure-signature']
+ except KeyError:
raise cherrypy.HTTPError(
- 401,
- 'Request signature does not match calculated payload '
- 'signature. Check that secret is correct.')
+ 401, 'x-pagure-signature header missing.')
- return payload
+ project = headers['x-pagure-project']
+ token = self.connection.get_project_webhook_token(project)
+ if not self._validate(body, token, request_signature):
+ # Give a second attempt as a token could have been
+ # re-generated server side. Refresh the token then retry.
+ self.log.info(
+ "Refresh cached webhook token and re-check signature")
+ token = self.connection.get_project_webhook_token(
+ project, force_refresh=True)
+ if not self._validate(body, token, request_signature):
+ raise cherrypy.HTTPError(
+ 401,
+ 'Request signature does not match calculated payload '
+ 'signature. Check that secret is correct.')
@cherrypy.expose
@cherrypy.tools.json_out(content_type='application/json; charset=utf-8')
@@ -961,9 +913,15 @@ class PagureWebController(BaseWebController):
for key, value in cherrypy.request.headers.items():
headers[key.lower()] = value
body = cherrypy.request.body.read()
- payload = self._validate_signature(body, headers)
- json_payload = json.loads(payload.decode('utf-8'))
+ if not self._source_whitelisted(
+ getattr(cherrypy.request.remote, 'ip'),
+ headers.get('x-forwarded-for')):
+ self._validate_signature(body, headers)
+ else:
+ self.log.info(
+ "Payload origin IP address whitelisted. Skip verify")
+ json_payload = json.loads(body.decode('utf-8'))
job = self.zuul_web.rpc.submitJob(
'pagure:%s:payload' % self.connection.connection_name,
{'payload': json_payload})
diff --git a/zuul/driver/pagure/paguresource.py b/zuul/driver/pagure/paguresource.py
index 40862d307..8fbb50bac 100644
--- a/zuul/driver/pagure/paguresource.py
+++ b/zuul/driver/pagure/paguresource.py
@@ -90,7 +90,7 @@ class PagureSource(BaseSource):
change, projects, tenant)
def getCachedChanges(self):
- return self.connection._change_cache.values()
+ return list(self.connection._change_cache.values())
def getProject(self, name):
p = self.connection.getProject(name)
diff --git a/zuul/driver/sql/__init__.py b/zuul/driver/sql/__init__.py
index 3c91b5224..955a58a99 100644
--- a/zuul/driver/sql/__init__.py
+++ b/zuul/driver/sql/__init__.py
@@ -15,6 +15,7 @@
from zuul.driver import Driver, ConnectionInterface, ReporterInterface
from zuul.driver.sql import sqlconnection
from zuul.driver.sql import sqlreporter
+from zuul.lib import capabilities as cpb
class SQLDriver(Driver, ConnectionInterface, ReporterInterface):
@@ -22,6 +23,8 @@ class SQLDriver(Driver, ConnectionInterface, ReporterInterface):
def __init__(self):
self.tenant_connections = {}
+ cpb.capabilities_registry.register_capabilities(
+ 'job_history', True)
def reconfigure(self, tenant):
# NOTE(corvus): This stores the connection of the first
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index 24e93f4fe..041bc7b69 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -19,6 +19,7 @@ import json
import logging
import multiprocessing
import os
+import psutil
import shutil
import signal
import shlex
@@ -2117,7 +2118,9 @@ class AnsibleJob(object):
def runAnsible(self, cmd, timeout, playbook, ansible_version,
wrapped=True, cleanup=False):
config_file = playbook.ansible_config
- env_copy = os.environ.copy()
+ env_copy = {key: value
+ for key, value in os.environ.copy().items()
+ if not key.startswith("ZUUL_")}
env_copy.update(self.ssh_agent.env)
if self.ara_callbacks:
env_copy['ARA_LOG_CONFIG'] = self.jobdir.logging_json
@@ -2200,7 +2203,7 @@ class AnsibleJob(object):
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
- preexec_fn=os.setsid,
+ start_new_session=True,
env=env_copy,
)
@@ -2229,17 +2232,22 @@ class AnsibleJob(object):
line = line[:1024].rstrip()
ansible_log.debug("Ansible output: %s" % (line,))
self.log.debug("Ansible output terminated")
- cpu_times = self.proc.cpu_times()
- self.log.debug("Ansible cpu times: user=%.2f, system=%.2f, "
- "children_user=%.2f, "
- "children_system=%.2f" %
- (cpu_times.user, cpu_times.system,
- cpu_times.children_user,
- cpu_times.children_system))
- self.cpu_times['user'] += cpu_times.user
- self.cpu_times['system'] += cpu_times.system
- self.cpu_times['children_user'] += cpu_times.children_user
- self.cpu_times['children_system'] += cpu_times.children_system
+ try:
+ cpu_times = self.proc.cpu_times()
+ self.log.debug("Ansible cpu times: user=%.2f, system=%.2f, "
+ "children_user=%.2f, "
+ "children_system=%.2f" %
+ (cpu_times.user, cpu_times.system,
+ cpu_times.children_user,
+ cpu_times.children_system))
+ self.cpu_times['user'] += cpu_times.user
+ self.cpu_times['system'] += cpu_times.system
+ self.cpu_times['children_user'] += cpu_times.children_user
+ self.cpu_times['children_system'] += cpu_times.children_system
+ except psutil.NoSuchProcess:
+ self.log.warn("Cannot get cpu_times for process %d. Is your"
+ "/proc mounted with hidepid=2"
+ " on an old linux kernel?", self.proc.pid)
ret = self.proc.wait()
self.log.debug("Ansible exit code: %s" % (ret,))
finally:
diff --git a/zuul/lib/ansible-config.conf b/zuul/lib/ansible-config.conf
index c311c52ca..e8920cb48 100644
--- a/zuul/lib/ansible-config.conf
+++ b/zuul/lib/ansible-config.conf
@@ -8,6 +8,7 @@ deprecated = true
requirements = ansible>=2.6,<2.7
[2.7]
+deprecated = true
requirements = ansible>=2.7,<2.8
[2.8]
diff --git a/zuul/lib/ansible.py b/zuul/lib/ansible.py
index f5eb7e0e8..df0ed0213 100644
--- a/zuul/lib/ansible.py
+++ b/zuul/lib/ansible.py
@@ -202,29 +202,54 @@ class AnsibleManager:
for future in concurrent.futures.as_completed(futures):
future.result()
+ def _validate_ansible(self, version):
+ result = True
+ try:
+ command = [
+ self.getAnsibleCommand(version, 'ansible'),
+ '--version',
+ ]
+
+ ret = subprocess.run(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ check=True)
+ self.log.info('Ansible version %s information: \n%s',
+ version, ret.stdout.decode())
+ except subprocess.CalledProcessError:
+ result = False
+ self.log.exception("Ansible version %s not working" % version)
+ except Exception:
+ result = False
+ self.log.exception(
+ 'Ansible version %s not installed' % version)
+ return result
+
+ def _validate_packages(self, version):
+ result = True
+ try:
+ extra_packages = self._getAnsible(version).extra_packages
+ python_package_check = \
+ "import pkg_resources; pkg_resources.require({})".format(
+ repr(extra_packages))
+
+ command = [self.getAnsibleCommand(version, 'python'),
+ '-c', python_package_check]
+ subprocess.run(command, check=True)
+ except Exception:
+ result = False
+ self.log.exception(
+ 'Ansible version %s installation is missing packages' %
+ version)
+ return result
+
def validate(self):
result = True
for version in self._supported_versions:
- try:
- command = [
- self.getAnsibleCommand(version, 'ansible'),
- '--version',
- ]
-
- ret = subprocess.run(command,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- check=True)
- self.log.info('Ansible version %s information: \n%s',
- version, ret.stdout.decode())
- except subprocess.CalledProcessError:
+ if not self._validate_ansible(version):
result = False
- self.log.exception("Ansible version %s not working" % version)
- except Exception:
+ elif not self._validate_packages(version):
result = False
- self.log.exception(
- 'Ansible version %s not installed' % version)
-
return result
def _getAnsible(self, version):
diff --git a/zuul/lib/auth.py b/zuul/lib/auth.py
index 3e96fd140..effb4bc5e 100644
--- a/zuul/lib/auth.py
+++ b/zuul/lib/auth.py
@@ -19,6 +19,7 @@ import jwt
from zuul import exceptions
import zuul.driver.auth.jwt as auth_jwt
+import zuul.lib.capabilities as cpb
"""AuthN/AuthZ related library, used by zuul-web."""
@@ -34,6 +35,8 @@ class AuthenticatorRegistry(object):
self.default_realm = None
def configure(self, config):
+ capabilities = {'realms': {}}
+ first_realm = None
for section_name in config.sections():
auth_match = re.match(r'^auth ([\'\"]?)(.*)(\1)$',
section_name, re.I)
@@ -54,10 +57,21 @@ class AuthenticatorRegistry(object):
auth_name))
# TODO catch config specific errors (missing fields)
self.authenticators[auth_name] = driver(**auth_config)
+ caps = self.authenticators[auth_name].get_capabilities()
+ # TODO there should be a bijective relationship between realms and
+ # authenticators. This should be enforced at config parsing.
+ capabilities['realms'].update(caps)
+ if first_realm is None:
+ first_realm = auth_config.get('realm', None)
if auth_config.get('default', 'false').lower() == 'true':
self.default_realm = auth_config.get('realm', 'DEFAULT')
- if self.default_realm is None:
- self.default_realm = 'DEFAULT'
+ # do we have any auth defined ?
+ if len(capabilities['realms'].keys()) > 0:
+ if self.default_realm is None:
+ # pick arbitrarily the first defined realm
+ self.default_realm = first_realm
+ capabilities['default_realm'] = self.default_realm
+ cpb.capabilities_registry.register_capabilities('auth', capabilities)
def authenticate(self, rawToken):
unverified = jwt.decode(rawToken, verify=False)
diff --git a/zuul/lib/capabilities.py b/zuul/lib/capabilities.py
new file mode 100644
index 000000000..96c10905b
--- /dev/null
+++ b/zuul/lib/capabilities.py
@@ -0,0 +1,51 @@
+# Copyright 2020 OpenStack Foundation
+# Copyright 2020 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import logging
+
+
+"""Simple Capabilities registry, to be used by Zuul Web."""
+
+
+class CapabilitiesRegistry(object):
+
+ log = logging.getLogger("Zuul.CapabilitiesRegistry")
+
+ def __init__(self):
+ self.capabilities = {}
+ self.set_default_capabilities()
+
+ def set_default_capabilities(self):
+ self.capabilities['job_history'] = False
+ self.capabilities['auth'] = {
+ 'realms': {},
+ 'default_realm': None,
+ }
+
+ def register_capabilities(self, capability_name, capabilities):
+ is_set = self.capabilities.setdefault(capability_name, None)
+ if is_set is None:
+ action = 'registered'
+ else:
+ action = 'updated'
+ if isinstance(is_set, dict) and isinstance(capabilities, dict):
+ self.capabilities[capability_name].update(capabilities)
+ else:
+ self.capabilities[capability_name] = capabilities
+ self.log.debug('Capabilities "%s" %s' % (capability_name, action))
+
+
+capabilities_registry = CapabilitiesRegistry()
diff --git a/zuul/lib/filecomments.py b/zuul/lib/filecomments.py
index a05fc8bf3..fe8399a0d 100644
--- a/zuul/lib/filecomments.py
+++ b/zuul/lib/filecomments.py
@@ -21,7 +21,8 @@ FILE_COMMENT = {
'start_character': int,
'end_line': int,
'end_character': int,
- }
+ },
+ 'level': str,
}
FILE_COMMENTS = {str: [FILE_COMMENT]}
diff --git a/zuul/lib/yamlutil.py b/zuul/lib/yamlutil.py
index 2c84b06ae..c2356da2d 100644
--- a/zuul/lib/yamlutil.py
+++ b/zuul/lib/yamlutil.py
@@ -14,14 +14,15 @@ from yaml import YAMLObject, YAMLError # noqa: F401
try:
# Explicit type ignore to deal with provisional import failure
+ # Details at https://github.com/python/mypy/issues/1153
from yaml import cyaml # type: ignore
import _yaml
SafeLoader = cyaml.CSafeLoader
SafeDumper = cyaml.CSafeDumper
Mark = _yaml.Mark
except ImportError:
- SafeLoader = yaml.SafeLoader
- SafeDumper = yaml.SafeDumper
+ SafeLoader = yaml.SafeLoader # type: ignore
+ SafeDumper = yaml.SafeDumper # type: ignore
Mark = yaml.Mark
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 836b7bb50..ee0f2c241 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -13,6 +13,7 @@
import logging
import textwrap
import urllib
+from abc import ABCMeta
from zuul import exceptions
from zuul import model
@@ -43,7 +44,7 @@ class StaticChangeQueueContextManager(object):
pass
-class PipelineManager(object):
+class PipelineManager(metaclass=ABCMeta):
"""Abstract Base Class for enqueing and processing Changes in a Pipeline"""
def __init__(self, sched, pipeline):
@@ -520,13 +521,14 @@ class PipelineManager(object):
return relevant_errors
def _loadDynamicLayout(self, item):
+ log = get_annotated_logger(self.log, item.event)
# Load layout
# Late import to break an import loop
import zuul.configloader
loader = zuul.configloader.ConfigLoader(
self.sched.connections, self.sched, None, None)
- self.log.debug("Loading dynamic layout")
+ log.debug("Loading dynamic layout")
(trusted_updates, untrusted_updates) = item.includesConfigUpdates()
build_set = item.current_build_set
@@ -540,23 +542,25 @@ class PipelineManager(object):
# catch syntax errors in config repos even though we won't
# actually run with that config.
if trusted_updates:
- self.log.debug("Loading dynamic layout (phase 1)")
+ log.debug("Loading dynamic layout (phase 1)")
trusted_layout = loader.createDynamicLayout(
item.pipeline.tenant,
build_set.files,
self.sched.ansible_manager,
- include_config_projects=True)
+ include_config_projects=True,
+ zuul_event_id=None)
trusted_errors = len(trusted_layout.loading_errors) > 0
# Then create the config a second time but without changes
# to config repos so that we actually use this config.
if untrusted_updates:
- self.log.debug("Loading dynamic layout (phase 2)")
+ log.debug("Loading dynamic layout (phase 2)")
untrusted_layout = loader.createDynamicLayout(
item.pipeline.tenant,
build_set.files,
self.sched.ansible_manager,
- include_config_projects=False)
+ include_config_projects=False,
+ zuul_event_id=None)
untrusted_errors = len(untrusted_layout.loading_errors) > 0
# Configuration state handling switchboard. Intentionally verbose
@@ -567,12 +571,12 @@ class PipelineManager(object):
# No errors found at all use dynamic untrusted layout
if (trusted_layout and not trusted_errors and
untrusted_layout and not untrusted_errors):
- self.log.debug("Loading dynamic layout complete")
+ log.debug("Loading dynamic layout complete")
return untrusted_layout
# No errors in untrusted only layout update
elif (not trusted_layout and
untrusted_layout and not untrusted_errors):
- self.log.debug("Loading dynamic layout complete")
+ log.debug("Loading dynamic layout complete")
return untrusted_layout
# No errors in trusted only layout update
elif (not untrusted_layout and
@@ -580,12 +584,12 @@ class PipelineManager(object):
# We're a change to a config repo (with no untrusted
# config items ahead), so just use the current pipeline
# layout.
- self.log.debug("Loading dynamic layout complete")
+ log.debug("Loading dynamic layout complete")
return item.queue.pipeline.tenant.layout
# Untrusted layout only works with trusted updates
elif (trusted_layout and not trusted_errors and
untrusted_layout and untrusted_errors):
- self.log.info("Configuration syntax error in dynamic layout")
+ log.info("Configuration syntax error in dynamic layout")
# The config is good if we include config-projects,
# but is currently invalid if we omit them. Instead
# of returning the whole error message, just leave a
@@ -610,7 +614,7 @@ class PipelineManager(object):
if relevant_errors:
item.setConfigErrors(relevant_errors)
return None
- self.log.info(
+ log.info(
"Configuration syntax error not related to "
"change context. Error won't be reported.")
return untrusted_layout
@@ -624,7 +628,7 @@ class PipelineManager(object):
if relevant_errors:
item.setConfigErrors(relevant_errors)
return None
- self.log.info(
+ log.info(
"Configuration syntax error not related to "
"change context. Error won't be reported.")
# We're a change to a config repo with errors not relevant
@@ -635,7 +639,7 @@ class PipelineManager(object):
"not accounted for.")
except Exception:
- self.log.exception("Error in dynamic layout")
+ log.exception("Error in dynamic layout")
item.setConfigError("Unknown configuration error")
return None
@@ -649,6 +653,9 @@ class PipelineManager(object):
def getLayout(self, item):
if item.item_ahead:
fallback_layout = item.item_ahead.layout
+ if fallback_layout is None:
+ # We're probably waiting on a merge job for the item ahead.
+ return None
else:
fallback_layout = item.pipeline.tenant.layout
if not item.change.updatesConfig(item.pipeline.tenant):
diff --git a/zuul/manager/dependent.py b/zuul/manager/dependent.py
index 4aa96c176..cf8c7f8ba 100644
--- a/zuul/manager/dependent.py
+++ b/zuul/manager/dependent.py
@@ -12,11 +12,10 @@
from zuul import model
from zuul.lib.logutil import get_annotated_logger
-from zuul.manager import PipelineManager, StaticChangeQueueContextManager
-from zuul.manager import DynamicChangeQueueContextManager
+from zuul.manager.shared import SharedQueuePipelineManager
-class DependentPipelineManager(PipelineManager):
+class DependentPipelineManager(SharedQueuePipelineManager):
"""PipelineManager for handling interrelated Changes.
The DependentPipelineManager puts Changes that share a Pipeline
@@ -29,69 +28,17 @@ class DependentPipelineManager(PipelineManager):
def __init__(self, *args, **kwargs):
super(DependentPipelineManager, self).__init__(*args, **kwargs)
- def buildChangeQueues(self, layout):
- self.log.debug("Building shared change queues")
- change_queues = {}
- tenant = self.pipeline.tenant
- layout_project_configs = layout.project_configs
-
- for project_name, project_configs in layout_project_configs.items():
- (trusted, project) = tenant.getProject(project_name)
- queue_name = None
- project_in_pipeline = False
- for project_config in layout.getAllProjectConfigs(project_name):
- project_pipeline_config = project_config.pipelines.get(
- self.pipeline.name)
- if project_pipeline_config is None:
- continue
- project_in_pipeline = True
- queue_name = project_pipeline_config.queue_name
- if queue_name:
- break
- if not project_in_pipeline:
- continue
- if queue_name and queue_name in change_queues:
- change_queue = change_queues[queue_name]
- else:
- p = self.pipeline
- change_queue = model.ChangeQueue(
- p,
- window=p.window,
- window_floor=p.window_floor,
- window_increase_type=p.window_increase_type,
- window_increase_factor=p.window_increase_factor,
- window_decrease_type=p.window_decrease_type,
- window_decrease_factor=p.window_decrease_factor,
- name=queue_name)
- if queue_name:
- # If this is a named queue, keep track of it in
- # case it is referenced again. Otherwise, it will
- # have a name automatically generated from its
- # constituent projects.
- change_queues[queue_name] = change_queue
- self.pipeline.addQueue(change_queue)
- self.log.debug("Created queue: %s" % change_queue)
- change_queue.addProject(project)
- self.log.debug("Added project %s to queue: %s" %
- (project, change_queue))
-
- def getChangeQueue(self, change, event, existing=None):
- log = get_annotated_logger(self.log, event)
-
- # Ignore the existing queue, since we can always get the correct queue
- # from the pipeline. This avoids enqueuing changes in a wrong queue
- # e.g. during re-configuration.
- queue = self.pipeline.getQueue(change.project)
- if queue:
- return StaticChangeQueueContextManager(queue)
- else:
- # There is no existing queue for this change. Create a
- # dynamic one for this one change's use
- change_queue = model.ChangeQueue(self.pipeline, dynamic=True)
- change_queue.addProject(change.project)
- self.pipeline.addQueue(change_queue)
- log.debug("Dynamically created queue %s", change_queue)
- return DynamicChangeQueueContextManager(change_queue)
+ def constructChangeQueue(self, queue_name):
+ p = self.pipeline
+ return model.ChangeQueue(
+ p,
+ window=p.window,
+ window_floor=p.window_floor,
+ window_increase_type=p.window_increase_type,
+ window_increase_factor=p.window_increase_factor,
+ window_decrease_type=p.window_decrease_type,
+ window_decrease_factor=p.window_decrease_factor,
+ name=queue_name)
def getNodePriority(self, item):
with self.getChangeQueue(item.change, item.event) as change_queue:
diff --git a/zuul/manager/serial.py b/zuul/manager/serial.py
new file mode 100644
index 000000000..caaa147c6
--- /dev/null
+++ b/zuul/manager/serial.py
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from zuul import model
+from zuul.manager.shared import SharedQueuePipelineManager
+
+
+class SerialPipelineManager(SharedQueuePipelineManager):
+ """PipelineManager with shared queues and a window of 1"""
+
+ changes_merge = False
+
+ def constructChangeQueue(self, queue_name):
+ return model.ChangeQueue(
+ self.pipeline,
+ window=1,
+ window_floor=1,
+ window_increase_type='none',
+ window_decrease_type='none',
+ name=queue_name)
+
+ def dequeueItem(self, item):
+ super(SerialPipelineManager, self).dequeueItem(item)
+ # If this was a dynamic queue from a speculative change,
+ # remove the queue (if empty)
+ if item.queue.dynamic:
+ if not item.queue.queue:
+ self.pipeline.removeQueue(item.queue)
diff --git a/zuul/manager/shared.py b/zuul/manager/shared.py
new file mode 100644
index 000000000..4b286f4f1
--- /dev/null
+++ b/zuul/manager/shared.py
@@ -0,0 +1,84 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from abc import ABCMeta
+
+from zuul import model
+from zuul.lib.logutil import get_annotated_logger
+from zuul.manager import PipelineManager, StaticChangeQueueContextManager
+from zuul.manager import DynamicChangeQueueContextManager
+
+
+class SharedQueuePipelineManager(PipelineManager, metaclass=ABCMeta):
+ """Intermediate class that adds the shared-queue behavior.
+
+ This is not a full pipeline manager; it just adds the shared-queue
+ behavior to the base class and is used by the dependent and serial
+ managers.
+ """
+
+ changes_merge = False
+
+ def buildChangeQueues(self, layout):
+ self.log.debug("Building shared change queues")
+ change_queues = {}
+ tenant = self.pipeline.tenant
+ layout_project_configs = layout.project_configs
+
+ for project_name, project_configs in layout_project_configs.items():
+ (trusted, project) = tenant.getProject(project_name)
+ queue_name = None
+ project_in_pipeline = False
+ for project_config in layout.getAllProjectConfigs(project_name):
+ project_pipeline_config = project_config.pipelines.get(
+ self.pipeline.name)
+ if project_pipeline_config is None:
+ continue
+ project_in_pipeline = True
+ queue_name = project_pipeline_config.queue_name
+ if queue_name:
+ break
+ if not project_in_pipeline:
+ continue
+ if queue_name and queue_name in change_queues:
+ change_queue = change_queues[queue_name]
+ else:
+ change_queue = self.constructChangeQueue(queue_name)
+ if queue_name:
+ # If this is a named queue, keep track of it in
+ # case it is referenced again. Otherwise, it will
+ # have a name automatically generated from its
+ # constituent projects.
+ change_queues[queue_name] = change_queue
+ self.pipeline.addQueue(change_queue)
+ self.log.debug("Created queue: %s" % change_queue)
+ change_queue.addProject(project)
+ self.log.debug("Added project %s to queue: %s" %
+ (project, change_queue))
+
+ def getChangeQueue(self, change, event, existing=None):
+ log = get_annotated_logger(self.log, event)
+
+ # Ignore the existing queue, since we can always get the correct queue
+ # from the pipeline. This avoids enqueuing changes in a wrong queue
+ # e.g. during re-configuration.
+ queue = self.pipeline.getQueue(change.project)
+ if queue:
+ return StaticChangeQueueContextManager(queue)
+ else:
+ # There is no existing queue for this change. Create a
+ # dynamic one for this one change's use
+ change_queue = model.ChangeQueue(self.pipeline, dynamic=True)
+ change_queue.addProject(change.project)
+ self.pipeline.addQueue(change_queue)
+ log.debug("Dynamically created queue %s", change_queue)
+ return DynamicChangeQueueContextManager(change_queue)
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index b07b7906a..4d6e6f31c 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -271,45 +271,59 @@ class Repo(object):
return self._createRepoObject(self.local_path, self.env)
@staticmethod
+ def _cleanup_leaked_ref_dirs(local_path, log, messages):
+ for root, dirs, files in os.walk(
+ os.path.join(local_path, '.git/refs'), topdown=False):
+ if not os.listdir(root) and not root.endswith('.git/refs'):
+ if log:
+ log.debug("Cleaning empty ref dir %s", root)
+ else:
+ messages.append("Cleaning empty ref dir %s" % root)
+ os.rmdir(root)
+
+ @staticmethod
def _reset(local_path, env, log=None):
messages = []
repo = Repo._createRepoObject(local_path, env)
origin = repo.remotes.origin
- seen = set()
- head = None
- stale_refs = origin.stale_refs
- # Update our local heads to match the remote, and pick one to
- # reset the repo to. We don't delete anything at this point
- # because we want to make sure the repo is in a state stable
- # enough for git to operate.
+
+ # Detach HEAD so we can work with references without interfering
+ # with any active branch. Any remote ref will do as long as it can
+ # be dereferenced to an existing commit.
+ for ref in origin.refs:
+ try:
+ repo.head.reference = ref.commit
+ break
+ except Exception:
+ if log:
+ log.debug("Unable to detach HEAD to %s", ref)
+ else:
+ messages.append("Unable to detach HEAD to %s" % ref)
+ else:
+ raise Exception("Couldn't detach HEAD to any existing commit")
+
+ # Delete local heads that no longer exist on the remote end
+ remote_heads = {r.remote_head for r in origin.refs}
+ for ref in repo.heads:
+ if ref.name not in remote_heads:
+ if log:
+ log.debug("Delete stale local ref %s", ref)
+ else:
+ messages.append("Delete stale local ref %s" % ref)
+ repo.delete_head(ref, force=True)
+
+ # Note: Before git 2.13 deleting a a ref foo/bar leaves an empty
+ # directory foo behind that will block creating the reference foo
+ # in the future. As a workaround we must clean up empty directories
+ # in .git/refs.
+ if repo.git.version_info[:2] < (2, 13):
+ Repo._cleanup_leaked_ref_dirs(local_path, log, messages)
+
+ # Update our local heads to match the remote
for ref in origin.refs:
if ref.remote_head == 'HEAD':
continue
- if ref in stale_refs:
- continue
repo.create_head(ref.remote_head, ref, force=True)
- seen.add(ref.remote_head)
- if head is None:
- head = ref.remote_head
- if log:
- log.debug("Reset to %s", head)
- else:
- messages.append("Reset to %s" % head)
- repo.head.reference = head
- for ref in stale_refs:
- if log:
- log.debug("Delete stale ref %s", ref.remote_head)
- else:
- messages.append("Delete stale ref %s" % ref.remote_head)
- # A stale ref means the upstream branch (e.g. foobar) was deleted
- # so we need to delete both our local head (if existing) and the
- # remote tracking head. Both repo.heads and ref.remote_head
- # contain the pure branch name so they can be compared easily.
- for head in repo.heads:
- if head.name == ref.remote_head:
- repo.delete_head(ref.remote_head, force=True)
- break
- git.refs.RemoteReference.delete(repo, ref, force=True)
return messages
def reset(self, zuul_event_id=None, build=None, process_worker=None):
@@ -326,15 +340,6 @@ class Repo(object):
for message in messages:
log.debug(message)
- def prune(self, zuul_event_id=None):
- log = get_annotated_logger(self.log, zuul_event_id)
- repo = self.createRepoObject(zuul_event_id)
- origin = repo.remotes.origin
- stale_refs = origin.stale_refs
- if stale_refs:
- log.debug("Pruning stale refs: %s", stale_refs)
- git.refs.RemoteReference.delete(repo, force=True, *stale_refs)
-
def getBranchHead(self, branch, zuul_event_id=None):
repo = self.createRepoObject(zuul_event_id)
branch_head = repo.heads[branch]
@@ -523,7 +528,7 @@ class Repo(object):
# --tags' is all that is necessary. See
# https://github.com/git/git/blob/master/Documentation/RelNotes/1.9.0.txt#L18-L20
self._git_fetch(repo, 'origin', zuul_event_id)
- self._git_fetch(repo, 'origin', zuul_event_id, tags=True)
+ self._git_fetch(repo, 'origin', zuul_event_id, tags=True, prune=True)
def isUpdateNeeded(self, repo_state, zuul_event_id=None):
repo = self.createRepoObject(zuul_event_id)
@@ -542,6 +547,7 @@ class Repo(object):
def getFiles(self, files, dirs=[], branch=None, commit=None,
zuul_event_id=None):
+ log = get_annotated_logger(self.log, zuul_event_id)
ret = {}
repo = self.createRepoObject(zuul_event_id)
if branch:
@@ -550,6 +556,9 @@ class Repo(object):
tree = repo.commit(commit).tree
for fn in files:
if fn in tree:
+ if tree[fn].type != 'blob':
+ log.warning(
+ "%s: object %s is not a blob", self.local_path, fn)
ret[fn] = tree[fn].data_stream.read().decode('utf8')
else:
ret[fn] = None
diff --git a/zuul/model.py b/zuul/model.py
index a272371fc..a5dd8827d 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -33,6 +33,7 @@ from zuul import change_matcher
from zuul.lib.config import get_default
from zuul.lib.artifacts import get_artifacts_from_result_data
from zuul.lib.logutil import get_annotated_logger
+from zuul.lib.capabilities import capabilities_registry
MERGER_MERGE = 1 # "git merge"
MERGER_MERGE_RESOLVE = 2 # "git merge -s resolve"
@@ -3020,7 +3021,8 @@ class QueueItem(object):
new_job_dict = job.toDict(self.pipeline.tenant)
# Ignore changes to file matchers since they don't affect
# the content of the job.
- for attr in ['files', 'irrelevant_files']:
+ for attr in ['files', 'irrelevant_files',
+ 'source_context', 'description']:
old_job_dict.pop(attr, None)
new_job_dict.pop(attr, None)
if (new_job_dict != old_job_dict):
@@ -4678,23 +4680,21 @@ class Capabilities(object):
facilitate consumers knowing if functionality is available
or not, keep track of distinct capability flags.
"""
- def __init__(self, job_history=False):
- self.job_history = job_history
+ def __init__(self, **kwargs):
+ self._capabilities = kwargs
def __repr__(self):
return '<Capabilities 0x%x %s>' % (id(self), self._renderFlags())
def _renderFlags(self):
- d = self.toDict()
- return " ".join(['{k}={v}'.format(k=k, v=v) for (k, v) in d.items()])
+ return " ".join(['{k}={v}'.format(k=k, v=repr(v))
+ for (k, v) in self._capabilities.items()])
def copy(self):
return Capabilities(**self.toDict())
def toDict(self):
- d = dict()
- d['job_history'] = self.job_history
- return d
+ return self._capabilities
class WebInfo(object):
@@ -4703,7 +4703,10 @@ class WebInfo(object):
def __init__(self, websocket_url=None,
capabilities=None, stats_url=None,
stats_prefix=None, stats_type=None):
- self.capabilities = capabilities or Capabilities()
+ _caps = capabilities
+ if _caps is None:
+ _caps = Capabilities(**capabilities_registry.capabilities)
+ self.capabilities = _caps
self.stats_prefix = stats_prefix
self.stats_type = stats_type
self.stats_url = stats_url
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 1d4e5cd46..cfcc64485 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -301,8 +301,7 @@ class Scheduler(threading.Thread):
'repl': self.start_repl,
'norepl': self.stop_repl,
}
- self._pause = False
- self._exit = False
+ self._hibernate = False
self._stopped = False
self._zuul_app = None
self.executor = None
@@ -698,8 +697,7 @@ class Scheduler(threading.Thread):
def exit(self):
self.log.debug("Prepare to exit")
- self._pause = True
- self._exit = True
+ self._hibernate = True
self.wake_event.set()
self.log.debug("Waiting for exit")
@@ -729,7 +727,8 @@ class Scheduler(threading.Thread):
"current mode is %o" % (key_dir, mode))
return key_dir
- def _save_queue(self):
+ def _save_queue(self) -> None:
+ # TODO JK: Remove when queues in ZK
pickle_file = self._get_queue_pickle_file()
events = []
while not self.trigger_event_queue.empty():
@@ -739,7 +738,8 @@ class Scheduler(threading.Thread):
self.log.debug("Saving queue")
pickle.dump(events, open(pickle_file, 'wb'))
- def _load_queue(self):
+ def _load_queue(self) -> None:
+ # TODO JK: Remove when queues in ZK
pickle_file = self._get_queue_pickle_file()
if os.path.exists(pickle_file):
self.log.debug("Loading queue")
@@ -750,13 +750,19 @@ class Scheduler(threading.Thread):
else:
self.log.debug("No queue file found")
- def _delete_queue(self):
+ def _delete_queue(self) -> None:
+ # TODO JK: Remove when queues in ZK
pickle_file = self._get_queue_pickle_file()
if os.path.exists(pickle_file):
self.log.debug("Deleting saved queue")
os.unlink(pickle_file)
- def resume(self):
+ def wakeUp(self) -> None:
+ """
+ Wakes up scheduler by loading pickled queue.
+
+ TODO JK: Remove when queues in ZK
+ """
try:
self._load_queue()
except Exception:
@@ -768,8 +774,9 @@ class Scheduler(threading.Thread):
self.log.debug("Resuming queue processing")
self.wake_event.set()
- def _doPauseEvent(self):
- if self._exit:
+ def _doHibernate(self) -> None:
+ # TODO JK: Remove when queues in ZK
+ if self._hibernate:
self.log.debug("Exiting")
self._save_queue()
os._exit(0)
@@ -1215,13 +1222,13 @@ class Scheduler(threading.Thread):
not self._stopped):
self.process_result_queue()
- if not self._pause:
+ if not self._hibernate:
while (not self.trigger_event_queue.empty() and
not self._stopped):
self.process_event_queue()
- if self._pause and self._areAllBuildsComplete():
- self._doPauseEvent()
+ if self._hibernate and self._areAllBuildsComplete():
+ self._doHibernate()
for tenant in self.abide.tenants.values():
for pipeline in tenant.layout.pipelines.values():
@@ -1275,12 +1282,17 @@ class Scheduler(threading.Thread):
if ((event.branch_updated and
hasattr(change, 'files') and
change.updatesConfig(tenant)) or
- event.branch_created or
(event.branch_deleted and
self.abide.hasUnparsedBranchCache(event.project_name,
event.branch))):
reconfigure_tenant = True
+ # The branch_created attribute is also true when a tag is
+ # created. Since we load config only from branches only trigger
+ # a tenant reconfiguration if the branch is set as well.
+ if event.branch_created and event.branch:
+ reconfigure_tenant = True
+
# If the driver knows the branch but we don't have a config, we
# also need to reconfigure. This happens if a GitHub branch
# was just configured as protected without a push in between.
@@ -1625,12 +1637,10 @@ class Scheduler(threading.Thread):
data['zuul_version'] = self.zuul_version
websocket_url = get_default(self.config, 'web', 'websocket_url', None)
- if self._pause:
- ret = 'Queue only mode: preparing to '
- if self._exit:
- ret += 'exit'
- ret += ', queue length: %s' % self.trigger_event_queue.qsize()
- data['message'] = ret
+ if self._hibernate:
+ data['message'] = 'Queue only mode: preparing to hibernate,' \
+ ' queue length: %s'\
+ % self.trigger_event_queue.qsize()
data['trigger_event_queue'] = {}
data['trigger_event_queue']['length'] = \
diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py
index 6c3b6a933..d6df14c58 100755
--- a/zuul/web/__init__.py
+++ b/zuul/web/__init__.py
@@ -1069,17 +1069,13 @@ class StreamManager(object):
class ZuulWeb(object):
log = logging.getLogger("zuul.web.ZuulWeb")
- def __init__(self, listen_address, listen_port,
- gear_server, gear_port,
- ssl_key=None, ssl_cert=None, ssl_ca=None,
- static_cache_expiry=3600,
- connections=None,
- info=None,
- static_path=None,
- zk_hosts=None,
- authenticators=None,
- command_socket=None,
- ):
+ def __init__(self, listen_address, listen_port, gear_server,
+ gear_port, ssl_key=None, ssl_cert=None, ssl_ca=None,
+ static_cache_expiry=3600, connections=None,
+ info=None, static_path=None, zk_hosts=None,
+ zk_timeout=None, zk_tls_cert=None, zk_tls_key=None,
+ zk_tls_ca=None, authenticators=None,
+ command_socket=None):
self.start_time = time.time()
self.listen_address = listen_address
self.listen_port = listen_port
@@ -1095,7 +1091,10 @@ class ZuulWeb(object):
client_id='Zuul Web Server')
self.zk = zuul.zk.ZooKeeper(enable_cache=True)
if zk_hosts:
- self.zk.connect(hosts=zk_hosts, read_only=True)
+ self.zk.connect(hosts=zk_hosts, read_only=True,
+ timeout=zk_timeout, tls_cert=zk_tls_cert,
+ tls_key=zk_tls_key, tls_ca=zk_tls_ca)
+
self.connections = connections
self.authenticators = authenticators
self.stream_manager = StreamManager()
@@ -1249,6 +1248,9 @@ class ZuulWeb(object):
self.command_socket.stop()
self.command_thread.join()
+ def join(self):
+ self.command_thread.join()
+
def runCommand(self):
while self._command_running:
try:
diff --git a/zuul/zk.py b/zuul/zk.py
index 371a365dd..980249526 100644
--- a/zuul/zk.py
+++ b/zuul/zk.py
@@ -115,7 +115,8 @@ class ZooKeeper(object):
self.log.warning("Retrying zookeeper connection")
self._last_retry_log = now
- def connect(self, hosts, read_only=False, timeout=10.0):
+ def connect(self, hosts, read_only=False, timeout=10.0,
+ tls_cert=None, tls_key=None, tls_ca=None):
'''
Establish a connection with ZooKeeper cluster.
@@ -127,10 +128,22 @@ class ZooKeeper(object):
:param bool read_only: If True, establishes a read-only connection.
:param float timeout: The ZooKeeper session timeout, in
seconds (default: 10.0).
+ :param str tls_key: Path to TLS key
+ :param str tls_cert: Path to TLS cert
+ :param str tls_ca: Path to TLS CA cert
'''
+
if self.client is None:
- self.client = KazooClient(hosts=hosts, read_only=read_only,
- timeout=timeout)
+ args = dict(hosts=hosts,
+ read_only=read_only,
+ timeout=timeout,
+ )
+ if tls_key:
+ args['use_ssl'] = True
+ args['keyfile'] = tls_key
+ args['certfile'] = tls_cert
+ args['ca'] = tls_ca
+ self.client = KazooClient(**args)
self.client.add_listener(self._connection_listener)
# Manually retry initial connection attempt
while True: