summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml89
-rw-r--r--doc/source/developer/model-changelog.rst7
-rw-r--r--doc/source/examples/docker-compose.yaml10
-rw-r--r--doc/source/examples/etc_nodepool/nodepool.yaml4
-rw-r--r--doc/source/examples/node-Dockerfile2
-rw-r--r--doc/source/examples/zuul-config/zuul.d/jobs.yaml4
-rw-r--r--doc/source/examples/zuul-config/zuul.d/jobs2.yaml4
-rw-r--r--doc/source/job-content.rst62
-rw-r--r--playbooks/zuul-stream/fixtures/test-stream.yaml15
-rw-r--r--playbooks/zuul-stream/validate.yaml5
-rw-r--r--releasenotes/notes/elasticsearch-filter-zuul-returned-vars-4a883813481cc313.yaml6
-rw-r--r--releasenotes/notes/fix-prune-database-a4873bd4dead7b5f.yaml16
-rw-r--r--releasenotes/notes/handle-existing-commits-with-cherry-pick-e1a979c2e7ed1a78.yaml14
-rw-r--r--releasenotes/notes/nodepool-slot-2061128253e50580.yaml7
-rw-r--r--releasenotes/notes/submit-requirements-1d61f88e54be1fde.yaml16
-rw-r--r--tests/base.py63
-rw-r--r--tests/fakegithub.py2
-rw-r--r--tests/fakegitlab.py24
-rw-r--r--tests/fixtures/config/elasticsearch-driver/git/common-config/playbooks/test.yaml2
-rw-r--r--tests/fixtures/config/in-repo-dir/git/org_project/zuul.d/project.yaml5
-rw-r--r--tests/fixtures/config/in-repo-dir/git/org_project2/.zuul.yaml5
-rw-r--r--tests/fixtures/config/in-repo-dir/git/org_project3/.extra-3.yaml2
-rw-r--r--tests/fixtures/config/in-repo-dir/git/org_project3/.zuul.yaml11
-rw-r--r--tests/fixtures/config/in-repo-dir/git/org_project3/README1
-rw-r--r--tests/fixtures/config/in-repo-dir/git/org_project3/extra-3.d/jobs-private.yaml2
-rw-r--r--tests/fixtures/config/in-repo-dir/main.yaml4
-rw-r--r--tests/fixtures/layouts/deps-by-topic.yaml13
-rw-r--r--tests/unit/test_circular_dependencies.py157
-rw-r--r--tests/unit/test_client.py294
-rw-r--r--tests/unit/test_gerrit.py99
-rw-r--r--tests/unit/test_github_driver.py42
-rw-r--r--tests/unit/test_gitlab_driver.py61
-rw-r--r--tests/unit/test_model_upgrade.py27
-rw-r--r--tests/unit/test_scheduler.py79
-rw-r--r--tests/unit/test_v3.py36
-rw-r--r--web/src/containers/FilterToolbar.jsx17
-rw-r--r--web/src/pages/Builds.jsx23
-rw-r--r--web/src/pages/Buildsets.jsx23
-rw-r--r--web/src/pages/Labels.jsx57
-rw-r--r--web/src/pages/Nodes.jsx143
-rw-r--r--web/src/pages/Projects.jsx93
-rw-r--r--zuul/ansible/base/callback/zuul_stream.py5
-rwxr-xr-xzuul/cmd/client.py44
-rw-r--r--zuul/configloader.py112
-rw-r--r--zuul/driver/elasticsearch/reporter.py4
-rw-r--r--zuul/driver/gerrit/gerritconnection.py52
-rw-r--r--zuul/driver/gerrit/gerritmodel.py4
-rw-r--r--zuul/driver/gerrit/gerritsource.py11
-rw-r--r--zuul/driver/github/githubconnection.py22
-rw-r--r--zuul/driver/github/githubreporter.py13
-rw-r--r--zuul/driver/gitlab/gitlabconnection.py44
-rw-r--r--zuul/driver/mqtt/mqttconnection.py15
-rw-r--r--zuul/driver/sql/sqlconnection.py39
-rw-r--r--zuul/executor/server.py5
-rw-r--r--zuul/manager/__init__.py92
-rw-r--r--zuul/merger/merger.py28
-rw-r--r--zuul/model.py69
-rw-r--r--zuul/model_api.py2
-rw-r--r--zuul/scheduler.py19
-rw-r--r--zuul/zk/job_request_queue.py2
60 files changed, 1668 insertions, 460 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 420ac11ad..a968f7100 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -195,25 +195,37 @@
# Image building jobs
- secret:
- name: zuul-dockerhub
+ name: zuul-registry-credentials
data:
- username: zuulzuul
- password: !encrypted/pkcs1-oaep
- - DFlbrDM5eUMptMGIVMXV1g455xOJLi92UYF08Z2/JlIGu3t6v052o9FKlVyj1ZmpXs5+2
- JTa5jHkLTvTsYs9fCaNcQc2nmViCyWNlbOMzjB17uiZOaYFNs1sMqZcUZbGEz7Y8ds6Qq
- NBXI10jWFPTah4QxUuBvUbT3vmjnUToCzexl5ZGhKgijcnROWfUsnlCdugpgoNIcPsUki
- zty5FotDihnrC8n8vIomVK6EClY38ty97pLrADzFDd+Cos/OUlvi2xooUhzx8Bn020rJA
- lqEU5v8LGXp5QkHx0MSDx6JY6KppJ/4p/yM+4By6l+A20zdcimxmgiNc9rMWPwDj7xsao
- m7NAZWmWqOO0Xkhgt6WOfugwgt9X46sgs2+yDEfbnI5ok8uRbAB/4FWj/KdpyXwhcf+O2
- wEfhxLwDbAoGONQPjb4YcZmCXtmR7Qe5t+n2jyczWXvrbaBDUQP5a+YtVNN/xhmQ7D740
- POlxv7bLxJAixzqaQ3d8Rz9ZEv6zzRuhWph32UQtZ1JxSNww+EvmXm2eEi2Q2z6pT1Cx/
- j2OrFyA2GL/UJOVb15VHKF6bgHPHWJtpjPFhqdcvBhVute4BWB+KPcWH+y+apHN1enK3H
- tNJO9iqm34nKwSuj5ExmFw50LtwR5/9FyRuRPq/vBL+8y82v8FDmeYsBeobn5M=
+ quay.io:
+ username: zuul-ci+opendevzuul
+ password: !encrypted/pkcs1-oaep
+ - B5RM116kdo4uTDHChDVWLbRUvXZHXkndzi9sZVmZ/8EjQRKhtsNfVWWPinr7cbXiN6NjA
+ ja85RrAMwYic8Y2f8cTRSowitPDmvAs8Av/zZ6PnFap6pGb1vQFuPYYOqEkkeqrQoY9vO
+ h2PV3Z2A+O48mzDt0CVhI6E8AQdqrMO7R0pO1plb5q9PFTHUlwgUdIbUMkVpndVMER8Ez
+ IdWs2bcVUC5hChUKFcSX2Jr8peOwQvLnzX5nGRAYATrp5tV/xsC8R/WkOVvKP0ORLJlf8
+ T/yZRXm7yw9LdizCsf+3jkzw726YZT+GqavPnygJvKeu+WVMtBs69TPhTdpq4B5WCnQko
+ Xq+g2WZkqdfeQRBp2BqMsk1wZ+fMnTqKb8iJRO0tjYmFJBq0xRRQHHjwLDd4clM1KBMqT
+ 17ss4QvTY61ZbbSM33M3FgibBPe96G7vCuTLiUnrmcR8i5M1H+XHPYMSSbCn3DgY5IBl2
+ Lr9ism4velFlzXQ9r71VM5v+JCSbcjrMAn5GKFZ5f9MeqWnATqYUO+xwEa/wb+sgPzd/v
+ VNlB74K3tg7wP1abGM7LtQkVKkWuy44sXayt2PcikE4pDO6EiODOxSv5a5Uxc6ef/cmfl
+ ADZ08hlob4CFNjL1m5lZSZNYDcsgwYuYXoP2a5kgykerEV+v3q0epjExCCUrEI=
+ api_token: !encrypted/pkcs1-oaep
+ - HTgQ6onrqJCCwjwT+TGjgiXT6qgwJMbWYUXlt5mz570RKNN4Ptsa6oqu4zpk+paAIwain
+ Olw3n4InshNAWIvTSro6zwYurmiiKCbxcocaxLzIKM+zbgIR7haKm+Crqei4tanWNpXHq
+ ULdu4muJojxu6OdYNhftPY3NLdFFuDA5IF2zv2f4gZSthvmQ9NzvEqPKFDV2yzNa6G4V+
+ jicYR3X7f0TjW9QF9p9CuSMvZrOCxp98zRelT8EJBKsb+38JVapvJgbAqAgJhAaGgJYkP
+ 1W7iqL+eNLMcRCnkoBEnOwZe66WO5gZagqWH25oRhjFnFM/qqMeYEG/AhAUae7vTHd9VK
+ yHSndEdrdCNpdzBsJQFGS7lxJMUl8ELL61qoojZSW91bGRIziedYEbDuPEzZUkoosaf6r
+ 50jvdRYOsmYnmr3Q5/T0QxW00qLL6FkPCRHg8wI2EXDPA/X5+vTlEYMhJxEJZ1+unHk9t
+ PPebXHCZ2B0VA+x3Khnt5BOJt/ewxfbfVu0CVgjSgrAUaSHWY3DLdvbuA9lwjafybkfxC
+ vIEhQz8AxAgWsdWpFOr9uPCB+5C+ma4jF15k/RUWoODHhvYZEoSNDOz4BRGrK+kHczr3F
+ P9x7xzzPhVCdbspsf+sV90xpUI0U4vCeVpi+3Ha1zZZR0JAC1SIWXUnJxB2EkM=
- job:
name: zuul-build-image
- parent: opendev-build-docker-image
- description: Build Docker images.
+ parent: opendev-build-container-image
+ description: Build container images.
allowed-projects: zuul/zuul
timeout: 2700 # 45 minutes
requires:
@@ -221,58 +233,67 @@
- python-base-3.11-bullseye-container-image
provides: zuul-container-image
vars: &zuul_image_vars
- docker_images:
+ promote_container_image_method: intermediate-registry
+ promote_container_image_job: zuul-upload-image
+ container_command: docker
+ container_images:
- context: .
- repository: zuul/zuul
+ registry: quay.io
+ repository: quay.io/zuul-ci/zuul
target: zuul
tags:
# If zuul.tag is defined: [ '3', '3.19', '3.19.0' ]. Only works for 3-component tags.
# Otherwise: ['latest']
&imagetag "{{ zuul.tag is defined | ternary([zuul.get('tag', '').split('.')[0], '.'.join(zuul.get('tag', '').split('.')[:2]), zuul.get('tag', '')], ['latest']) }}"
- context: .
- repository: zuul/zuul-executor
+ registry: quay.io
+ repository: quay.io/zuul-ci/zuul-executor
target: zuul-executor
tags: *imagetag
- context: .
- repository: zuul/zuul-fingergw
+ registry: quay.io
+ repository: quay.io/zuul-ci/zuul-fingergw
target: zuul-fingergw
tags: *imagetag
- context: .
- repository: zuul/zuul-merger
+ registry: quay.io
+ repository: quay.io/zuul-ci/zuul-merger
target: zuul-merger
tags: *imagetag
- context: .
- repository: zuul/zuul-scheduler
+ registry: quay.io
+ repository: quay.io/zuul-ci/zuul-scheduler
target: zuul-scheduler
tags: *imagetag
- context: .
- repository: zuul/zuul-web
+ registry: quay.io
+ repository: quay.io/zuul-ci/zuul-web
target: zuul-web
tags: *imagetag
- job:
name: zuul-upload-image
- parent: opendev-upload-docker-image
- description: Build Docker images and upload to Docker Hub.
+ parent: opendev-upload-container-image
+ description: Build container images and upload.
allowed-projects: zuul/zuul
requires:
- python-builder-3.11-bullseye-container-image
- python-base-3.11-bullseye-container-image
provides: zuul-container-image
secrets:
- name: docker_credentials
- secret: zuul-dockerhub
+ name: container_registry_credentials
+ secret: zuul-registry-credentials
pass-to-parent: true
vars: *zuul_image_vars
- job:
name: zuul-promote-image
- parent: opendev-promote-docker-image
- description: Promote previously uploaded Docker images.
+ parent: opendev-promote-container-image
+ description: Promote previously uploaded container images.
allowed-projects: zuul/zuul
secrets:
- name: docker_credentials
- secret: zuul-dockerhub
+ name: container_registry_credentials
+ secret: zuul-registry-credentials
pass-to-parent: true
nodeset:
nodes: []
@@ -370,11 +391,11 @@
jobs:
- zuul-release-python
- zuul-publish-nox-docs
- - upload-docker-image:
+ - upload-container-image:
secrets:
- name: docker_credentials
- secret: zuul-dockerhub
+ name: container_registry_credentials
+ secret: zuul-registry-credentials
pass-to-parent: true
vars:
<<: *zuul_image_vars
- upload_docker_image_promote: false
+ upload_container_image_promote: false
diff --git a/doc/source/developer/model-changelog.rst b/doc/source/developer/model-changelog.rst
index b80979362..f78b3f0a0 100644
--- a/doc/source/developer/model-changelog.rst
+++ b/doc/source/developer/model-changelog.rst
@@ -112,3 +112,10 @@ Version 12
:Prior Zuul version: 8.0.1
:Description: Adds job_versions and build_versions to BuildSet.
Affects schedulers.
+
+Version 13
+----------
+:Prior Zuul version: 8.2.0
+:Description: Stores only the necessary event info as part of a queue item
+ instead of the full trigger event.
+ Affects schedulers.
diff --git a/doc/source/examples/docker-compose.yaml b/doc/source/examples/docker-compose.yaml
index 43c0bec61..0eb05835b 100644
--- a/doc/source/examples/docker-compose.yaml
+++ b/doc/source/examples/docker-compose.yaml
@@ -11,7 +11,7 @@ services:
networks:
- zuul
gerritconfig:
- image: docker.io/zuul/zuul-executor
+ image: quay.io/zuul-ci/zuul-executor
environment:
- http_proxy
- https_proxy
@@ -66,7 +66,7 @@ services:
zuul-scheduler -f'
# FIXME: The scheduler has no ansible anymore so use the executor image.
# This needs to be changes such that ansible is not required for startup.
- image: docker.io/zuul/zuul-scheduler
+ image: quay.io/zuul-ci/zuul-scheduler
volumes:
- "${ZUUL_TUTORIAL_CONFIG:-./etc_zuul/}:/etc/zuul/:z"
- "./playbooks/:/var/playbooks/:z"
@@ -83,7 +83,7 @@ services:
- mysql
ports:
- "9000:9000"
- image: docker.io/zuul/zuul-web
+ image: quay.io/zuul-ci/zuul-web
environment:
ZUUL_MYSQL_PASSWORD: secret
volumes:
@@ -101,7 +101,7 @@ services:
- ZUUL_MYSQL_PASSWORD=secret
depends_on:
- scheduler
- image: docker.io/zuul/zuul-executor
+ image: quay.io/zuul-ci/zuul-executor
volumes:
- "${ZUUL_TUTORIAL_CONFIG:-./etc_zuul/}:/etc/zuul/:z"
- "./playbooks/:/var/playbooks/:z"
@@ -126,7 +126,7 @@ services:
launcher:
depends_on:
- zk
- image: docker.io/zuul/nodepool-launcher
+ image: quay.io/zuul-ci/nodepool-launcher
volumes:
- "./playbooks/:/var/playbooks/:z"
- "./etc_nodepool/:/etc/nodepool/:z"
diff --git a/doc/source/examples/etc_nodepool/nodepool.yaml b/doc/source/examples/etc_nodepool/nodepool.yaml
index 1c1830635..105b0ef54 100644
--- a/doc/source/examples/etc_nodepool/nodepool.yaml
+++ b/doc/source/examples/etc_nodepool/nodepool.yaml
@@ -7,7 +7,7 @@ zookeeper-tls:
ca: /var/certs/certs/cacert.pem
labels:
- - name: ubuntu-focal
+ - name: ubuntu-jammy
providers:
- name: static-vms
@@ -16,7 +16,7 @@ providers:
- name: main
nodes:
- name: node
- labels: ubuntu-focal
+ labels: ubuntu-jammy
host-key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOgHJYejINIKzUiuSJ2MN8uPc+dfFrZ9JH1hLWS8gI+g"
python-path: /usr/bin/python3
username: root
diff --git a/doc/source/examples/node-Dockerfile b/doc/source/examples/node-Dockerfile
index ff74aa592..b588bcf2c 100644
--- a/doc/source/examples/node-Dockerfile
+++ b/doc/source/examples/node-Dockerfile
@@ -1,4 +1,4 @@
-FROM docker.io/ubuntu:20.04
+FROM docker.io/ubuntu:22.04
RUN apt-get update \
&& DEBIAN_FRONTEND="noninteractive" apt-get -y install \
diff --git a/doc/source/examples/zuul-config/zuul.d/jobs.yaml b/doc/source/examples/zuul-config/zuul.d/jobs.yaml
index 8ad979e46..bb9822f48 100644
--- a/doc/source/examples/zuul-config/zuul.d/jobs.yaml
+++ b/doc/source/examples/zuul-config/zuul.d/jobs.yaml
@@ -3,5 +3,5 @@
parent: null
nodeset:
nodes:
- - name: ubuntu-focal
- label: ubuntu-focal
+ - name: ubuntu-jammy
+ label: ubuntu-jammy
diff --git a/doc/source/examples/zuul-config/zuul.d/jobs2.yaml b/doc/source/examples/zuul-config/zuul.d/jobs2.yaml
index a6ed1a633..c7b4a6878 100644
--- a/doc/source/examples/zuul-config/zuul.d/jobs2.yaml
+++ b/doc/source/examples/zuul-config/zuul.d/jobs2.yaml
@@ -18,5 +18,5 @@
timeout: 1800
nodeset:
nodes:
- - name: ubuntu-focal
- label: ubuntu-focal
+ - name: ubuntu-jammy
+ label: ubuntu-jammy
diff --git a/doc/source/job-content.rst b/doc/source/job-content.rst
index d6bb07683..643632d5b 100644
--- a/doc/source/job-content.rst
+++ b/doc/source/job-content.rst
@@ -669,6 +669,68 @@ of item.
- shell: echo example
when: zuul_success | bool
+.. var:: nodepool
+
+ Information about each host from Nodepool is supplied in the
+ `nodepool` host variable. Availability of values varies based on
+ the node and the driver that supplied it. Values may be ``null``
+ if they are not applicable.
+
+ .. var:: label
+
+ The nodepool label of this node.
+
+ .. var:: az
+
+ The availability zone in which this node was placed.
+
+ .. var:: cloud
+
+ The name of the cloud in which this node was created.
+
+ .. var:: provider
+
+ The name of the nodepool provider of this node.
+
+ .. var:: region
+
+ The name of the nodepool provider's region.
+
+ .. var:: host_id
+
+ The cloud's host identification for this node's hypervisor.
+
+ .. var:: external_id
+
+ The cloud's identifier for this node.
+
+ .. var:: slot
+
+ If the node supports running multiple jobs on the node, a unique
+ numeric ID for the subdivision of the node assigned to this job.
+ This may be used to avoid build directory collisions.
+
+ .. var:: interface_ip
+
+ The best IP address to use to contact the node as determined by
+ the cloud provider and nodepool.
+
+ .. var:: public_ipv4
+
+ A public IPv4 address of the node.
+
+ .. var:: private_ipv4
+
+ A private IPv4 address of the node.
+
+ .. var:: public_ipv6
+
+ A public IPv6 address of the node.
+
+ .. var:: private_ipv6
+
+ A private IPv6 address of the node.
+
Change Items
~~~~~~~~~~~~
diff --git a/playbooks/zuul-stream/fixtures/test-stream.yaml b/playbooks/zuul-stream/fixtures/test-stream.yaml
index 488f8cb2f..49ceb092b 100644
--- a/playbooks/zuul-stream/fixtures/test-stream.yaml
+++ b/playbooks/zuul-stream/fixtures/test-stream.yaml
@@ -1,3 +1,16 @@
+# NOTE: We run this before starting the log streaming to validate that
+# if we set zuul_console_disabled, we don't try to connect at all. If
+# there is a log streamer running when we run this test, then we have
+# no indication that we avoid the connection step.
+- name: Run command to show skipping works without zuul_console running
+ vars:
+ zuul_console_disabled: true
+ hosts: node
+ tasks:
+ - name: Run quiet command
+ command: echo 'This command should not stream'
+ when: new_console | default(false)
+
- name: Start zuul stream daemon
hosts: node
tasks:
@@ -11,7 +24,7 @@
port: 19887
when: new_console | default(false)
-- name: Run command to show skipping works
+- name: Run command to show skipping works with zuul_console running
vars:
zuul_console_disabled: true
hosts: node
diff --git a/playbooks/zuul-stream/validate.yaml b/playbooks/zuul-stream/validate.yaml
index 81c613406..c7069f335 100644
--- a/playbooks/zuul-stream/validate.yaml
+++ b/playbooks/zuul-stream/validate.yaml
@@ -27,3 +27,8 @@
- name: Validate output - binary data
shell: |
egrep "^.*\| {{ item.node }} \| \\\\x80abc" {{ item.filename }}
+
+- name: Validate output - no waiting on logger
+ shell: |
+ egrep -v "Waiting on logger" {{ item.filename }}
+ egrep -v "Log Stream did not terminate" {{ item.filename }}
diff --git a/releasenotes/notes/elasticsearch-filter-zuul-returned-vars-4a883813481cc313.yaml b/releasenotes/notes/elasticsearch-filter-zuul-returned-vars-4a883813481cc313.yaml
new file mode 100644
index 000000000..70b388b16
--- /dev/null
+++ b/releasenotes/notes/elasticsearch-filter-zuul-returned-vars-4a883813481cc313.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The Elasticsearch reporter now filters `zuul` data from the job returned vars.
+ The job returned vars under the `zuul` key are meant for Zuul and may include large
+ amount of data such as file comments.
diff --git a/releasenotes/notes/fix-prune-database-a4873bd4dead7b5f.yaml b/releasenotes/notes/fix-prune-database-a4873bd4dead7b5f.yaml
new file mode 100644
index 000000000..6e036a754
--- /dev/null
+++ b/releasenotes/notes/fix-prune-database-a4873bd4dead7b5f.yaml
@@ -0,0 +1,16 @@
+---
+fixes:
+ - |
+ The `zuul-admin prune-database` command did not completely delete
+ expected data from the database. It may not have deleted all of
+ the buildsets older than the specified cutoff time, and it may
+ have left orphaned data in ancillary tables. This has been
+ corrected and it should now work as expected. Additionally, a
+ `--batch-size` argument has been added so that it may delete data
+ in multiple transactions which can facilitate smoother operation
+ when run while Zuul is operational.
+
+ Users who have previously run the command may need to manually
+ delete rows from the `zuul_build`, `zuul_build_event`,
+ `zuul_artifact`, and `zuul_provides` tables which do not have
+ corresponding entries in the `zuul_buildset` table.
diff --git a/releasenotes/notes/handle-existing-commits-with-cherry-pick-e1a979c2e7ed1a78.yaml b/releasenotes/notes/handle-existing-commits-with-cherry-pick-e1a979c2e7ed1a78.yaml
new file mode 100644
index 000000000..dd5c502d2
--- /dev/null
+++ b/releasenotes/notes/handle-existing-commits-with-cherry-pick-e1a979c2e7ed1a78.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ The `cherry-pick` merge mode will now silently skip commits that have
+ already been applied to the tree when cherry-picking, instead of failing
+ with an error.
+
+ The exception to this is if the source of the cherry-pick is an empty
+ commit, in which case it is always kept.
+
+ Skipping commits that have already been applied is important in a pipeline
+ triggered by the Gerrit `change-merged` event (like the `deploy` pipeline),
+ since the scheduler would previously try to cherry-pick the change on top
+ of the commit that just merged and fail.
diff --git a/releasenotes/notes/nodepool-slot-2061128253e50580.yaml b/releasenotes/notes/nodepool-slot-2061128253e50580.yaml
new file mode 100644
index 000000000..c7ba3e1dc
--- /dev/null
+++ b/releasenotes/notes/nodepool-slot-2061128253e50580.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ The :var:`nodepool.slot` variable has been added to host vars.
+ This is supplied by the nodepool static and metastatic drivers
+ starting with version 8.0.0. It may be used to avoid build
+ directory collisions on nodes that run more than one job.
diff --git a/releasenotes/notes/submit-requirements-1d61f88e54be1fde.yaml b/releasenotes/notes/submit-requirements-1d61f88e54be1fde.yaml
new file mode 100644
index 000000000..14f037156
--- /dev/null
+++ b/releasenotes/notes/submit-requirements-1d61f88e54be1fde.yaml
@@ -0,0 +1,16 @@
+---
+fixes:
+ - |
+ Zuul will now attempt to honor Gerrit "submit requirements" when
+ determining whether to enqueue a change into a dependent (i.e.,
+ "gate") pipeline. Zuul previously honored only Gerrit's older
+ "submit records" feature. The new checks will avoid enqueing
+ changes in "gate" pipelines in the cases where Zuul can
+ unambiguously determine that there is no possibility of merging,
+ but some non-mergable changes may still be enqueued if Zuul can
+ not be certain whether a rule should apply or be disregarded (in
+ these cases, Gerrit will fail to merge the change and Zuul will
+ report the buildset as a MERGE_FAILURE).
+
+ This requires Gerrit version 3.5.0 or later, and Zuul to be
+ configured with HTTP access for Gerrit.
diff --git a/tests/base.py b/tests/base.py
index dcd316fab..c031497a2 100644
--- a/tests/base.py
+++ b/tests/base.py
@@ -384,7 +384,7 @@ class FakeGerritChange(object):
def __init__(self, gerrit, number, project, branch, subject,
status='NEW', upstream_root=None, files={},
parent=None, merge_parents=None, merge_files=None,
- topic=None):
+ topic=None, empty=False):
self.gerrit = gerrit
self.source = gerrit
self.reported = 0
@@ -403,6 +403,7 @@ class FakeGerritChange(object):
self.comments = []
self.checks = {}
self.checks_history = []
+ self.submit_requirements = []
self.data = {
'branch': branch,
'comments': self.comments,
@@ -429,7 +430,7 @@ class FakeGerritChange(object):
self.addMergePatchset(parents=merge_parents,
merge_files=merge_files)
else:
- self.addPatchset(files=files, parent=parent)
+ self.addPatchset(files=files, parent=parent, empty=empty)
if merge_parents:
self.data['parents'] = merge_parents
elif parent:
@@ -503,9 +504,11 @@ class FakeGerritChange(object):
repo.heads['master'].checkout()
return r
- def addPatchset(self, files=None, large=False, parent=None):
+ def addPatchset(self, files=None, large=False, parent=None, empty=False):
self.latest_patchset += 1
- if not files:
+ if empty:
+ files = {}
+ elif not files:
fn = '%s-%s' % (self.branch.replace('/', '_'), self.number)
data = ("test %s %s %s\n" %
(self.branch, self.number, self.latest_patchset))
@@ -786,6 +789,12 @@ class FakeGerritChange(object):
return [{'status': 'NOT_READY',
'labels': labels}]
+ def getSubmitRequirements(self):
+ return self.submit_requirements
+
+ def setSubmitRequirements(self, reqs):
+ self.submit_requirements = reqs
+
def setDependsOn(self, other, patchset):
self.depends_on_change = other
self.depends_on_patchset = patchset
@@ -892,6 +901,7 @@ class FakeGerritChange(object):
data['parents'] = self.data['parents']
if 'topic' in self.data:
data['topic'] = self.data['topic']
+ data['submit_requirements'] = self.getSubmitRequirements()
return json.loads(json.dumps(data))
def queryRevisionHTTP(self, revision):
@@ -1330,7 +1340,7 @@ class FakeGerritConnection(gerritconnection.GerritConnection):
def addFakeChange(self, project, branch, subject, status='NEW',
files=None, parent=None, merge_parents=None,
- merge_files=None, topic=None):
+ merge_files=None, topic=None, empty=False):
"""Add a change to the fake Gerrit."""
self.change_number += 1
c = FakeGerritChange(self, self.change_number, project, branch,
@@ -1338,7 +1348,7 @@ class FakeGerritConnection(gerritconnection.GerritConnection):
status=status, files=files, parent=parent,
merge_parents=merge_parents,
merge_files=merge_files,
- topic=topic)
+ topic=topic, empty=empty)
self.changes[self.change_number] = c
return c
@@ -1494,8 +1504,9 @@ class FakeGerritConnection(gerritconnection.GerritConnection):
msg = msg[1:-1]
l = [queryMethod(change) for change in self.changes.values()
if msg in change.data['commitMessage']]
- elif query.startswith("status:"):
+ else:
cut_off_time = 0
+ l = list(self.changes.values())
parts = query.split(" ")
for part in parts:
if part.startswith("-age"):
@@ -1503,17 +1514,18 @@ class FakeGerritConnection(gerritconnection.GerritConnection):
cut_off_time = (
datetime.datetime.now().timestamp() - float(age[:-1])
)
- l = [
- queryMethod(change) for change in self.changes.values()
- if change.data["lastUpdated"] >= cut_off_time
- ]
- elif query.startswith('topic:'):
- topic = query[len('topic:'):].strip()
- l = [queryMethod(change) for change in self.changes.values()
- if topic in change.data.get('topic', '')]
- else:
- # Query all open changes
- l = [queryMethod(change) for change in self.changes.values()]
+ l = [
+ change for change in l
+ if change.data["lastUpdated"] >= cut_off_time
+ ]
+ if part.startswith('topic:'):
+ topic = part[len('topic:'):].strip()
+ l = [
+ change for change in l
+ if 'topic' in change.data
+ and topic in change.data['topic']
+ ]
+ l = [queryMethod(change) for change in l]
return l
def simpleQuerySSH(self, query, event=None):
@@ -2118,6 +2130,21 @@ class FakeGitlabConnection(gitlabconnection.GitlabConnection):
yield
self._test_web_server.options['community_edition'] = False
+ @contextmanager
+ def enable_delayed_complete_mr(self, complete_at):
+ self._test_web_server.options['delayed_complete_mr'] = complete_at
+ yield
+ self._test_web_server.options['delayed_complete_mr'] = 0
+
+ @contextmanager
+ def enable_uncomplete_mr(self):
+ self._test_web_server.options['uncomplete_mr'] = True
+ orig = self.gl_client.get_mr_wait_factor
+ self.gl_client.get_mr_wait_factor = 0.1
+ yield
+ self.gl_client.get_mr_wait_factor = orig
+ self._test_web_server.options['uncomplete_mr'] = False
+
class GitlabChangeReference(git.Reference):
_common_path_default = "refs/merge-requests"
diff --git a/tests/fakegithub.py b/tests/fakegithub.py
index 725c083e2..25dcb15da 100644
--- a/tests/fakegithub.py
+++ b/tests/fakegithub.py
@@ -730,7 +730,7 @@ class FakeGithubSession(object):
'message': 'Merge not allowed because of fake reason',
}
return FakeResponse(data, 405, 'Method not allowed')
- pr.setMerged(json["commit_message"])
+ pr.setMerged(json.get("commit_message", ""))
return FakeResponse({"merged": True}, 200)
return FakeResponse(None, 404)
diff --git a/tests/fakegitlab.py b/tests/fakegitlab.py
index 294887af0..e4a3e1ac8 100644
--- a/tests/fakegitlab.py
+++ b/tests/fakegitlab.py
@@ -21,6 +21,7 @@ import re
import socketserver
import threading
import urllib.parse
+import time
from git.util import IterableList
@@ -32,12 +33,17 @@ class GitlabWebServer(object):
self.merge_requests = merge_requests
self.fake_repos = defaultdict(lambda: IterableList('name'))
# A dictionary so we can mutate it
- self.options = dict(community_edition=False)
+ self.options = dict(
+ community_edition=False,
+ delayed_complete_mr=0,
+ uncomplete_mr=False)
+ self.stats = {"get_mr": 0}
def start(self):
merge_requests = self.merge_requests
fake_repos = self.fake_repos
options = self.options
+ stats = self.stats
class Server(http.server.SimpleHTTPRequestHandler):
log = logging.getLogger("zuul.test.GitlabWebServer")
@@ -146,6 +152,7 @@ class GitlabWebServer(object):
self.wfile.write(data)
def get_mr(self, project, mr):
+ stats["get_mr"] += 1
mr = self._get_mr(project, mr)
data = {
'target_branch': mr.branch,
@@ -162,13 +169,20 @@ class GitlabWebServer(object):
'labels': mr.labels,
'merged_at': mr.merged_at.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if mr.merged_at else mr.merged_at,
- 'diff_refs': {
+ 'merge_status': mr.merge_status,
+ }
+ if options['delayed_complete_mr'] and \
+ time.monotonic() < options['delayed_complete_mr']:
+ diff_refs = None
+ elif options['uncomplete_mr']:
+ diff_refs = None
+ else:
+ diff_refs = {
'base_sha': mr.base_sha,
'head_sha': mr.sha,
'start_sha': 'c380d3acebd181f13629a25d2e2acca46ffe1e00'
- },
- 'merge_status': mr.merge_status,
- }
+ }
+ data['diff_refs'] = diff_refs
self.send_data(data)
def get_mr_approvals(self, project, mr):
diff --git a/tests/fixtures/config/elasticsearch-driver/git/common-config/playbooks/test.yaml b/tests/fixtures/config/elasticsearch-driver/git/common-config/playbooks/test.yaml
index 75e5b6934..7902f19d0 100644
--- a/tests/fixtures/config/elasticsearch-driver/git/common-config/playbooks/test.yaml
+++ b/tests/fixtures/config/elasticsearch-driver/git/common-config/playbooks/test.yaml
@@ -2,4 +2,6 @@
tasks:
- zuul_return:
data:
+ zuul:
+ log_url: some-log-url
foo: 'bar'
diff --git a/tests/fixtures/config/in-repo-dir/git/org_project/zuul.d/project.yaml b/tests/fixtures/config/in-repo-dir/git/org_project/zuul.d/project.yaml
index 8d241f17e..201338c5c 100644
--- a/tests/fixtures/config/in-repo-dir/git/org_project/zuul.d/project.yaml
+++ b/tests/fixtures/config/in-repo-dir/git/org_project/zuul.d/project.yaml
@@ -1,8 +1,13 @@
- job:
name: project-test1
+- queue:
+ name: project-test-queue
+ allow-circular-dependencies: true
+
- project:
name: org/project
+ queue: project-test-queue
check:
jobs:
- project-test1
diff --git a/tests/fixtures/config/in-repo-dir/git/org_project2/.zuul.yaml b/tests/fixtures/config/in-repo-dir/git/org_project2/.zuul.yaml
index b86588e42..0c3d89309 100644
--- a/tests/fixtures/config/in-repo-dir/git/org_project2/.zuul.yaml
+++ b/tests/fixtures/config/in-repo-dir/git/org_project2/.zuul.yaml
@@ -1,5 +1,10 @@
+- queue:
+ name: project2-test-queue
+ allow-circular-dependencies: true
+
- project:
name: org/project2
+ queue: project2-test-queue
check:
jobs:
- project2-private-extra-file
diff --git a/tests/fixtures/config/in-repo-dir/git/org_project3/.extra-3.yaml b/tests/fixtures/config/in-repo-dir/git/org_project3/.extra-3.yaml
new file mode 100644
index 000000000..d0098aca7
--- /dev/null
+++ b/tests/fixtures/config/in-repo-dir/git/org_project3/.extra-3.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: project3-private-extra-file
diff --git a/tests/fixtures/config/in-repo-dir/git/org_project3/.zuul.yaml b/tests/fixtures/config/in-repo-dir/git/org_project3/.zuul.yaml
new file mode 100644
index 000000000..4d760354f
--- /dev/null
+++ b/tests/fixtures/config/in-repo-dir/git/org_project3/.zuul.yaml
@@ -0,0 +1,11 @@
+- queue:
+ name: project3-test-queue
+ allow-circular-dependencies: true
+
+- project:
+ name: org/project3
+ queue: project3-test-queue
+ check:
+ jobs:
+ - project3-private-extra-file
+ - project3-private-extra-dir
diff --git a/tests/fixtures/config/in-repo-dir/git/org_project3/README b/tests/fixtures/config/in-repo-dir/git/org_project3/README
new file mode 100644
index 000000000..9daeafb98
--- /dev/null
+++ b/tests/fixtures/config/in-repo-dir/git/org_project3/README
@@ -0,0 +1 @@
+test
diff --git a/tests/fixtures/config/in-repo-dir/git/org_project3/extra-3.d/jobs-private.yaml b/tests/fixtures/config/in-repo-dir/git/org_project3/extra-3.d/jobs-private.yaml
new file mode 100644
index 000000000..d62ad658f
--- /dev/null
+++ b/tests/fixtures/config/in-repo-dir/git/org_project3/extra-3.d/jobs-private.yaml
@@ -0,0 +1,2 @@
+- job:
+ name: project3-private-extra-dir
diff --git a/tests/fixtures/config/in-repo-dir/main.yaml b/tests/fixtures/config/in-repo-dir/main.yaml
index bf1679769..f4ea575d3 100644
--- a/tests/fixtures/config/in-repo-dir/main.yaml
+++ b/tests/fixtures/config/in-repo-dir/main.yaml
@@ -12,6 +12,10 @@
extra-config-paths:
- .extra.yaml
- extra.d/
+ - org/project3:
+ extra-config-paths:
+ - .extra-3.yaml
+ - extra-3.d/
- tenant:
name: tenant-two
diff --git a/tests/fixtures/layouts/deps-by-topic.yaml b/tests/fixtures/layouts/deps-by-topic.yaml
index 3824c5c2c..e7e8fc465 100644
--- a/tests/fixtures/layouts/deps-by-topic.yaml
+++ b/tests/fixtures/layouts/deps-by-topic.yaml
@@ -47,24 +47,27 @@
run: playbooks/run.yaml
- job:
- name: test-job
+ name: check-job
+
+- job:
+ name: gate-job
- project:
name: org/project1
queue: integrated
check:
jobs:
- - test-job
+ - check-job
gate:
jobs:
- - test-job
+ - gate-job
- project:
name: org/project2
queue: integrated
check:
jobs:
- - test-job
+ - check-job
gate:
jobs:
- - test-job
+ - gate-job
diff --git a/tests/unit/test_circular_dependencies.py b/tests/unit/test_circular_dependencies.py
index f534b2596..28ca528b5 100644
--- a/tests/unit/test_circular_dependencies.py
+++ b/tests/unit/test_circular_dependencies.py
@@ -2246,6 +2246,70 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertEqual(B.data["status"], "MERGED")
@simple_layout('layouts/deps-by-topic.yaml')
+ def test_deps_by_topic_git_needs(self):
+ A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
+ topic='test-topic')
+ B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
+ topic='test-topic')
+ C = self.fake_gerrit.addFakeChange('org/project2', "master", "C",
+ topic='other-topic')
+ D = self.fake_gerrit.addFakeChange('org/project1', "master", "D",
+ topic='other-topic')
+
+ # Git level dependency between B and C
+ B.setDependsOn(C, 1)
+
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(len(A.patchsets[-1]["approvals"]), 1)
+ self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified")
+ self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1")
+
+ self.assertEqual(len(B.patchsets[-1]["approvals"]), 1)
+ self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified")
+ self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
+
+ self.assertEqual(len(C.patchsets[-1]["approvals"]), 1)
+ self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified")
+ self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1")
+
+ self.assertEqual(len(D.patchsets[-1]["approvals"]), 1)
+ self.assertEqual(D.patchsets[-1]["approvals"][0]["type"], "Verified")
+ self.assertEqual(D.patchsets[-1]["approvals"][0]["value"], "1")
+
+ # We're about to add approvals to changes without adding the
+ # triggering events to Zuul, so that we can be sure that it is
+ # enqueing the changes based on dependencies, not because of
+ # triggering events. Since it will have the changes cached
+ # already (without approvals), we need to clear the cache
+ # first.
+ for connection in self.scheds.first.connections.connections.values():
+ connection.maintainCache([], max_age=0)
+
+ A.addApproval("Code-Review", 2)
+ B.addApproval("Code-Review", 2)
+ C.addApproval("Code-Review", 2)
+ D.addApproval("Code-Review", 2)
+ A.addApproval("Approved", 1)
+ C.addApproval("Approved", 1)
+ D.addApproval("Approved", 1)
+ self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
+ self.waitUntilSettled()
+
+ self.assertEqual(A.reported, 3)
+ self.assertEqual(B.reported, 3)
+ self.assertEqual(C.reported, 3)
+ self.assertEqual(D.reported, 3)
+ self.assertEqual(A.data["status"], "MERGED")
+ self.assertEqual(B.data["status"], "MERGED")
+ self.assertEqual(C.data["status"], "MERGED")
+ self.assertEqual(D.data["status"], "MERGED")
+
+ @simple_layout('layouts/deps-by-topic.yaml')
def test_deps_by_topic_new_patchset(self):
# Make sure that we correctly update the change cache on new
# patchsets.
@@ -2267,8 +2331,8 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1")
self.assertHistory([
- dict(name="test-job", result="SUCCESS", changes="2,1 1,1"),
- dict(name="test-job", result="SUCCESS", changes="1,1 2,1"),
+ dict(name="check-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="check-job", result="SUCCESS", changes="1,1 2,1"),
], ordered=False)
A.addPatchset()
@@ -2277,10 +2341,10 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertHistory([
# Original check run
- dict(name="test-job", result="SUCCESS", changes="2,1 1,1"),
- dict(name="test-job", result="SUCCESS", changes="1,1 2,1"),
+ dict(name="check-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="check-job", result="SUCCESS", changes="1,1 2,1"),
# Second check run
- dict(name="test-job", result="SUCCESS", changes="2,1 1,2"),
+ dict(name="check-job", result="SUCCESS", changes="2,1 1,2"),
], ordered=False)
def test_deps_by_topic_multi_tenant(self):
@@ -2368,16 +2432,85 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.executor_server.release()
self.waitUntilSettled()
- # A quirk: at the end of this process, the first change in
- # Gerrit has a complete run because the process of updating it
- # involves a new patchset that is enqueued. Compare to the
- # same test in GitHub.
self.assertHistory([
dict(name="project-job", result="ABORTED", changes="1,1"),
dict(name="project-job", result="ABORTED", changes="1,1 2,1"),
+ dict(name="project-job", result="SUCCESS", changes="1,2 2,1"),
dict(name="project-job", result="SUCCESS", changes="2,1 1,2"),
], ordered=False)
+ @simple_layout('layouts/deps-by-topic.yaml')
+ def test_dependency_refresh_by_topic_check(self):
+ # Test that when two changes are put into a cycle, the
+ # dependencies are refreshed and items already in pipelines
+ # are updated.
+ self.executor_server.hold_jobs_in_build = True
+
+ # This simulates the typical workflow where a developer
+ # uploads changes one at a time.
+ # The first change:
+ A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
+ topic='test-topic')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ # Now that it has been uploaded, upload the second change
+ # in the same topic.
+ B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
+ topic='test-topic')
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertHistory([
+ dict(name="check-job", result="ABORTED", changes="1,1"),
+ dict(name="check-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="check-job", result="SUCCESS", changes="1,1 2,1"),
+ ], ordered=False)
+
+ @simple_layout('layouts/deps-by-topic.yaml')
+ def test_dependency_refresh_by_topic_gate(self):
+ # Test that when two changes are put into a cycle, the
+ # dependencies are refreshed and items already in pipelines
+ # are updated.
+ self.executor_server.hold_jobs_in_build = True
+
+ # This simulates a workflow where a developer adds a change to
+ # a cycle already in gate.
+ A = self.fake_gerrit.addFakeChange('org/project1', "master", "A",
+ topic='test-topic')
+ B = self.fake_gerrit.addFakeChange('org/project2', "master", "B",
+ topic='test-topic')
+ A.addApproval("Code-Review", 2)
+ B.addApproval("Code-Review", 2)
+ A.addApproval("Approved", 1)
+ self.fake_gerrit.addEvent(B.addApproval("Approved", 1))
+ self.waitUntilSettled()
+
+ # Add a new change to the cycle.
+ C = self.fake_gerrit.addFakeChange('org/project1', "master", "C",
+ topic='test-topic')
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ # At the end of this process, the gate jobs should be aborted
+ # because the new dpendency showed up.
+ self.assertEqual(A.data["status"], "NEW")
+ self.assertEqual(B.data["status"], "NEW")
+ self.assertEqual(C.data["status"], "NEW")
+ self.assertHistory([
+ dict(name="gate-job", result="ABORTED", changes="1,1 2,1"),
+ dict(name="gate-job", result="ABORTED", changes="1,1 2,1"),
+ dict(name="check-job", result="SUCCESS", changes="2,1 1,1 3,1"),
+ ], ordered=False)
+
class TestGithubCircularDependencies(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
@@ -2607,13 +2740,11 @@ class TestGithubCircularDependencies(ZuulTestCase):
self.executor_server.release()
self.waitUntilSettled()
- # A quirk: at the end of this process, the second PR in GitHub
- # has a complete run because the process of updating the first
- # change is not disruptive to the second. Compare to the same
- # test in Gerrit.
self.assertHistory([
dict(name="project-job", result="ABORTED",
changes=f"{A.number},{A.head_sha}"),
dict(name="project-job", result="SUCCESS",
changes=f"{A.number},{A.head_sha} {B.number},{B.head_sha}"),
+ dict(name="project-job", result="SUCCESS",
+ changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}"),
], ordered=False)
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index b51639952..2e90d3fb4 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -21,16 +21,19 @@ import time
import configparser
import datetime
import dateutil.tz
+import uuid
import fixtures
import jwt
import testtools
+import sqlalchemy
from zuul.zk import ZooKeeperClient
+from zuul.zk.locks import SessionAwareLock
from zuul.cmd.client import parse_cutoff
from tests.base import BaseTestCase, ZuulTestCase
-from tests.base import FIXTURE_DIR
+from tests.base import FIXTURE_DIR, iterate_timeout
from kazoo.exceptions import NoNodeError
@@ -362,81 +365,112 @@ class TestOnlineZKOperations(ZuulTestCase):
def assertSQLState(self):
pass
- def test_delete_pipeline_check(self):
- self.executor_server.hold_jobs_in_build = True
- A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
- self.waitUntilSettled()
-
- config_file = os.path.join(self.test_root, 'zuul.conf')
- with open(config_file, 'w') as f:
- self.config.write(f)
-
- # Make sure the pipeline exists
- self.getZKTree('/zuul/tenant/tenant-one/pipeline/check/item')
- p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul-admin'),
- '-c', config_file,
- 'delete-pipeline-state',
- 'tenant-one', 'check',
- ],
- stdout=subprocess.PIPE)
- out, _ = p.communicate()
- self.log.debug(out.decode('utf8'))
- # Make sure it's deleted
- with testtools.ExpectedException(NoNodeError):
- self.getZKTree('/zuul/tenant/tenant-one/pipeline/check/item')
-
- self.executor_server.hold_jobs_in_build = False
- self.executor_server.release()
- B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ def _test_delete_pipeline(self, pipeline):
+ sched = self.scheds.first.sched
+ tenant = sched.abide.tenants['tenant-one']
+ # Force a reconfiguration due to a config change (so that the
+ # tenant trigger event queue gets a minimum timestamp set)
+ file_dict = {'zuul.yaml': ''}
+ M = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files=file_dict)
+ M.setMerged()
+ self.fake_gerrit.addEvent(M.getChangeMergedEvent())
self.waitUntilSettled()
- self.assertHistory([
- dict(name='project-merge', result='SUCCESS', changes='1,1'),
- dict(name='project-merge', result='SUCCESS', changes='2,1'),
- dict(name='project-test1', result='SUCCESS', changes='2,1'),
- dict(name='project-test2', result='SUCCESS', changes='2,1'),
- ], ordered=False)
- def test_delete_pipeline_gate(self):
self.executor_server.hold_jobs_in_build = True
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- A.addApproval('Code-Review', 2)
- self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ if pipeline == 'check':
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ else:
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
self.waitUntilSettled()
- config_file = os.path.join(self.test_root, 'zuul.conf')
- with open(config_file, 'w') as f:
- self.config.write(f)
-
- # Make sure the pipeline exists
- self.getZKTree('/zuul/tenant/tenant-one/pipeline/gate/item')
- p = subprocess.Popen(
- [os.path.join(sys.prefix, 'bin/zuul-admin'),
- '-c', config_file,
- 'delete-pipeline-state',
- 'tenant-one', 'gate',
- ],
- stdout=subprocess.PIPE)
- out, _ = p.communicate()
- self.log.debug(out.decode('utf8'))
- # Make sure it's deleted
- with testtools.ExpectedException(NoNodeError):
- self.getZKTree('/zuul/tenant/tenant-one/pipeline/gate/item')
+ # Lock the check pipeline so we don't process the event we're
+ # about to submit (it should go into the pipeline trigger event
+ # queue and stay there while we delete the pipeline state).
+ # This way we verify that events arrived before the deletion
+ # still work.
+ plock = SessionAwareLock(
+ self.zk_client.client,
+ f"/zuul/locks/pipeline/{tenant.name}/{pipeline}")
+ plock.acquire(blocking=True, timeout=None)
+ try:
+ self.log.debug('Got pipeline lock')
+ # Add a new event while our old last reconfigure time is
+ # in place.
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ if pipeline == 'check':
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ else:
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ # Wait until it appears in the pipeline trigger event queue
+ self.log.debug('Waiting for event')
+ for x in iterate_timeout(30, 'trigger event queue has events'):
+ if sched.pipeline_trigger_events[
+ tenant.name][pipeline].hasEvents():
+ break
+ self.log.debug('Got event')
+ except Exception:
+ plock.release()
+ raise
+ # Grab the run handler lock so that we will continue to avoid
+ # further processing of the event after we release the
+ # pipeline lock (which the delete command needs to acquire).
+ sched.run_handler_lock.acquire()
+ try:
+ plock.release()
+ self.log.debug('Got run lock')
+ config_file = os.path.join(self.test_root, 'zuul.conf')
+ with open(config_file, 'w') as f:
+ self.config.write(f)
+
+ # Make sure the pipeline exists
+ self.getZKTree(
+ f'/zuul/tenant/{tenant.name}/pipeline/{pipeline}/item')
+ # Save the old layout uuid
+ tenant = sched.abide.tenants[tenant.name]
+ old_layout_uuid = tenant.layout.uuid
+ self.log.debug('Deleting pipeline state')
+ p = subprocess.Popen(
+ [os.path.join(sys.prefix, 'bin/zuul-admin'),
+ '-c', config_file,
+ 'delete-pipeline-state',
+ tenant.name, pipeline,
+ ],
+ stdout=subprocess.PIPE)
+ # Delete the pipeline state
+ out, _ = p.communicate()
+ self.log.debug(out.decode('utf8'))
+ # Make sure it's deleted
+ with testtools.ExpectedException(NoNodeError):
+ self.getZKTree(
+ f'/zuul/tenant/{tenant.name}/pipeline/{pipeline}/item')
+ finally:
+ sched.run_handler_lock.release()
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
- B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- B.addApproval('Code-Review', 2)
- self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
self.waitUntilSettled()
self.assertHistory([
- dict(name='project-merge', result='SUCCESS', changes='1,1'),
dict(name='project-merge', result='SUCCESS', changes='2,1'),
- dict(name='project-test1', result='SUCCESS', changes='2,1'),
- dict(name='project-test2', result='SUCCESS', changes='2,1'),
+ dict(name='project-merge', result='SUCCESS', changes='3,1'),
+ dict(name='project-test1', result='SUCCESS', changes='3,1'),
+ dict(name='project-test2', result='SUCCESS', changes='3,1'),
], ordered=False)
+ tenant = sched.abide.tenants[tenant.name]
+ new_layout_uuid = tenant.layout.uuid
+ self.assertEqual(old_layout_uuid, new_layout_uuid)
+ self.assertEqual(tenant.layout.pipelines[pipeline].state.layout_uuid,
+ old_layout_uuid)
+
+ def test_delete_pipeline_check(self):
+ self._test_delete_pipeline('check')
+
+ def test_delete_pipeline_gate(self):
+ self._test_delete_pipeline('gate')
class TestDBPruneParse(BaseTestCase):
@@ -467,27 +501,107 @@ class TestDBPruneParse(BaseTestCase):
class DBPruneTestCase(ZuulTestCase):
tenant_config_file = 'config/single-tenant/main.yaml'
+ # This should be larger than the limit size in sqlconnection
+ num_buildsets = 55
+
+ def _createBuildset(self, update_time):
+ connection = self.scheds.first.sched.sql.connection
+ buildset_uuid = uuid.uuid4().hex
+ event_id = uuid.uuid4().hex
+ with connection.getSession() as db:
+ start_time = update_time - datetime.timedelta(seconds=1)
+ end_time = update_time
+ db_buildset = db.createBuildSet(
+ uuid=buildset_uuid,
+ tenant='tenant-one',
+ pipeline='check',
+ project='org/project',
+ change='1',
+ patchset='1',
+ ref='refs/changes/1',
+ oldrev='',
+ newrev='',
+ branch='master',
+ zuul_ref='Zref',
+ ref_url='http://gerrit.example.com/1',
+ event_id=event_id,
+ event_timestamp=update_time,
+ updated=update_time,
+ first_build_start_time=start_time,
+ last_build_end_time=end_time,
+ result='SUCCESS',
+ )
+ for build_num in range(2):
+ build_uuid = uuid.uuid4().hex
+ db_build = db_buildset.createBuild(
+ uuid=build_uuid,
+ job_name=f'job{build_num}',
+ start_time=start_time,
+ end_time=end_time,
+ result='SUCCESS',
+ voting=True,
+ )
+ for art_num in range(2):
+ db_build.createArtifact(
+ name=f'artifact{art_num}',
+ url='http://example.com',
+ )
+ for provides_num in range(2):
+ db_build.createProvides(
+ name=f'item{provides_num}',
+ )
+ for event_num in range(2):
+ db_build.createBuildEvent(
+ event_type=f'event{event_num}',
+ event_time=start_time,
+ )
+
+ def _query(self, db, model):
+ table = model.__table__
+ q = db.session().query(model).order_by(table.c.id.desc())
+ try:
+ return q.all()
+ except sqlalchemy.orm.exc.NoResultFound:
+ return []
+
+ def _getBuildsets(self, db):
+ return self._query(db, db.connection.buildSetModel)
+
+ def _getBuilds(self, db):
+ return self._query(db, db.connection.buildModel)
+
+ def _getProvides(self, db):
+ return self._query(db, db.connection.providesModel)
+
+ def _getArtifacts(self, db):
+ return self._query(db, db.connection.artifactModel)
+
+ def _getBuildEvents(self, db):
+ return self._query(db, db.connection.buildEventModel)
def _setup(self):
config_file = os.path.join(self.test_root, 'zuul.conf')
with open(config_file, 'w') as f:
self.config.write(f)
- A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
- self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
- self.waitUntilSettled()
-
- time.sleep(1)
-
- B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
- self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
- self.waitUntilSettled()
+ update_time = (datetime.datetime.utcnow() -
+ datetime.timedelta(minutes=self.num_buildsets))
+ for x in range(self.num_buildsets):
+ update_time = update_time + datetime.timedelta(minutes=1)
+ self._createBuildset(update_time)
connection = self.scheds.first.sched.sql.connection
- buildsets = connection.getBuildsets()
- builds = connection.getBuilds()
- self.assertEqual(len(buildsets), 2)
- self.assertEqual(len(builds), 6)
+ with connection.getSession() as db:
+ buildsets = self._getBuildsets(db)
+ builds = self._getBuilds(db)
+ artifacts = self._getArtifacts(db)
+ provides = self._getProvides(db)
+ events = self._getBuildEvents(db)
+ self.assertEqual(len(buildsets), self.num_buildsets)
+ self.assertEqual(len(builds), 2 * self.num_buildsets)
+ self.assertEqual(len(artifacts), 4 * self.num_buildsets)
+ self.assertEqual(len(provides), 4 * self.num_buildsets)
+ self.assertEqual(len(events), 4 * self.num_buildsets)
for build in builds:
self.log.debug("Build %s %s %s",
build, build.start_time, build.end_time)
@@ -503,6 +617,7 @@ class DBPruneTestCase(ZuulTestCase):
start_time = buildsets[0].first_build_start_time
self.log.debug("Cutoff %s", start_time)
+ # Use the default batch size (omit --batch-size arg)
p = subprocess.Popen(
[os.path.join(sys.prefix, 'bin/zuul-admin'),
'-c', config_file,
@@ -513,13 +628,20 @@ class DBPruneTestCase(ZuulTestCase):
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
- buildsets = connection.getBuildsets()
- builds = connection.getBuilds()
- self.assertEqual(len(buildsets), 1)
- self.assertEqual(len(builds), 3)
+ with connection.getSession() as db:
+ buildsets = self._getBuildsets(db)
+ builds = self._getBuilds(db)
+ artifacts = self._getArtifacts(db)
+ provides = self._getProvides(db)
+ events = self._getBuildEvents(db)
for build in builds:
self.log.debug("Build %s %s %s",
build, build.start_time, build.end_time)
+ self.assertEqual(len(buildsets), 1)
+ self.assertEqual(len(builds), 2)
+ self.assertEqual(len(artifacts), 4)
+ self.assertEqual(len(provides), 4)
+ self.assertEqual(len(events), 4)
def test_db_prune_older_than(self):
# Test pruning buildsets older than a relative time
@@ -535,15 +657,23 @@ class DBPruneTestCase(ZuulTestCase):
'-c', config_file,
'prune-database',
'--older-than', '0d',
+ '--batch-size', '5',
],
stdout=subprocess.PIPE)
out, _ = p.communicate()
self.log.debug(out.decode('utf8'))
- buildsets = connection.getBuildsets()
- builds = connection.getBuilds()
+ with connection.getSession() as db:
+ buildsets = self._getBuildsets(db)
+ builds = self._getBuilds(db)
+ artifacts = self._getArtifacts(db)
+ provides = self._getProvides(db)
+ events = self._getBuildEvents(db)
self.assertEqual(len(buildsets), 0)
self.assertEqual(len(builds), 0)
+ self.assertEqual(len(artifacts), 0)
+ self.assertEqual(len(provides), 0)
+ self.assertEqual(len(events), 0)
class TestDBPruneMysql(DBPruneTestCase):
diff --git a/tests/unit/test_gerrit.py b/tests/unit/test_gerrit.py
index 2a63d5ef8..4085e8b1b 100644
--- a/tests/unit/test_gerrit.py
+++ b/tests/unit/test_gerrit.py
@@ -827,6 +827,14 @@ class TestGerritFake(ZuulTestCase):
config_file = "zuul-gerrit-github.conf"
tenant_config_file = "config/circular-dependencies/main.yaml"
+ def _make_tuple(self, data):
+ ret = []
+ for c in data:
+ dep_change = c['number']
+ dep_ps = c['currentPatchSet']['number']
+ ret.append((int(dep_change), int(dep_ps)))
+ return sorted(ret)
+
def _get_tuple(self, change_number):
ret = []
data = self.fake_gerrit.get(
@@ -903,6 +911,11 @@ class TestGerritFake(ZuulTestCase):
ret = self.fake_gerrit._getSubmittedTogether(C1, None)
self.assertEqual(ret, [(4, 1)])
+ # Test also the query used by the GerritConnection:
+ ret = self.fake_gerrit._simpleQuery('status:open topic:test-topic')
+ ret = self._make_tuple(ret)
+ self.assertEqual(ret, [(3, 1), (4, 1)])
+
class TestGerritConnection(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
@@ -958,6 +971,92 @@ class TestGerritConnection(ZuulTestCase):
self.assertEqual(A.data['status'], 'MERGED')
self.assertEqual(B.data['status'], 'MERGED')
+ def test_submit_requirements(self):
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ A.addApproval('Code-Review', 2)
+ # Set an unsatisfied submit requirement
+ A.setSubmitRequirements([
+ {
+ "name": "Code-Review",
+ "description": "Disallow self-review",
+ "status": "UNSATISFIED",
+ "is_legacy": False,
+ "submittability_expression_result": {
+ "expression": "label:Code-Review=MAX,user=non_uploader "
+ "AND -label:Code-Review=MIN",
+ "fulfilled": False,
+ "passing_atoms": [],
+ "failing_atoms": [
+ "label:Code-Review=MAX,user=non_uploader",
+ "label:Code-Review=MIN"
+ ]
+ }
+ },
+ {
+ "name": "Verified",
+ "status": "UNSATISFIED",
+ "is_legacy": True,
+ "submittability_expression_result": {
+ "expression": "label:Verified=MAX -label:Verified=MIN",
+ "fulfilled": False,
+ "passing_atoms": [],
+ "failing_atoms": [
+ "label:Verified=MAX",
+ "-label:Verified=MIN"
+ ]
+ }
+ },
+ ])
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+ self.assertHistory([])
+ self.assertEqual(A.queried, 1)
+ self.assertEqual(A.data['status'], 'NEW')
+
+ # Mark the requirement satisfied
+ A.setSubmitRequirements([
+ {
+ "name": "Code-Review",
+ "description": "Disallow self-review",
+ "status": "SATISFIED",
+ "is_legacy": False,
+ "submittability_expression_result": {
+ "expression": "label:Code-Review=MAX,user=non_uploader "
+ "AND -label:Code-Review=MIN",
+ "fulfilled": False,
+ "passing_atoms": [
+ "label:Code-Review=MAX,user=non_uploader",
+ ],
+ "failing_atoms": [
+ "label:Code-Review=MIN"
+ ]
+ }
+ },
+ {
+ "name": "Verified",
+ "status": "UNSATISFIED",
+ "is_legacy": True,
+ "submittability_expression_result": {
+ "expression": "label:Verified=MAX -label:Verified=MIN",
+ "fulfilled": False,
+ "passing_atoms": [],
+ "failing_atoms": [
+ "label:Verified=MAX",
+ "-label:Verified=MIN"
+ ]
+ }
+ },
+ ])
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+ self.assertHistory([
+ dict(name="project-merge", result="SUCCESS", changes="1,1"),
+ dict(name="project-test1", result="SUCCESS", changes="1,1"),
+ dict(name="project-test2", result="SUCCESS", changes="1,1"),
+ ], ordered=False)
+ self.assertEqual(A.queried, 3)
+ self.assertEqual(A.data['status'], 'MERGED')
+
class TestGerritUnicodeRefs(ZuulTestCase):
config_file = 'zuul-gerrit-web.conf'
diff --git a/tests/unit/test_github_driver.py b/tests/unit/test_github_driver.py
index fb46aa7d1..3060f5673 100644
--- a/tests/unit/test_github_driver.py
+++ b/tests/unit/test_github_driver.py
@@ -1430,7 +1430,9 @@ class TestGithubDriver(ZuulTestCase):
repo._set_branch_protection(
'master', contexts=['tenant-one/check', 'tenant-one/gate'])
- A = self.fake_github.openFakePullRequest('org/project', 'master', 'A')
+ pr_description = "PR description"
+ A = self.fake_github.openFakePullRequest('org/project', 'master', 'A',
+ body_text=pr_description)
self.fake_github.emitEvent(A.getPullRequestOpenedEvent())
self.waitUntilSettled()
@@ -1448,6 +1450,9 @@ class TestGithubDriver(ZuulTestCase):
merges = [report for report in self.fake_github.github_data.reports
if report[2] == 'merge']
assert (len(merges) == 1 and merges[0][3] == 'squash')
+ # Assert that we won't duplicate the PR title in the merge
+ # message description.
+ self.assertEqual(A.merge_message, pr_description)
@simple_layout('layouts/basic-github.yaml', driver='github')
def test_invalid_event(self):
@@ -1741,6 +1746,41 @@ class TestGithubUnprotectedBranches(ZuulTestCase):
# branch
self.assertLess(old, new)
+ def test_base_branch_updated(self):
+ self.create_branch('org/project2', 'feature')
+ github = self.fake_github.getGithubClient()
+ repo = github.repo_from_project('org/project2')
+ repo._set_branch_protection('master', True)
+
+ # Make sure Zuul picked up and cached the configured branches
+ self.scheds.execute(lambda app: app.sched.reconfigure(app.config))
+ self.waitUntilSettled()
+
+ github_connection = self.scheds.first.connections.connections['github']
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ project = github_connection.source.getProject('org/project2')
+
+ # Verify that only the master branch is considered protected
+ branches = github_connection.getProjectBranches(project, tenant)
+ self.assertEqual(branches, ["master"])
+
+ A = self.fake_github.openFakePullRequest('org/project2', 'master',
+ 'A')
+ # Fake an event from a pull-request that changed the base
+ # branch from "feature" to "master". The PR is already
+ # using "master" as base, but the event still references
+ # the old "feature" branch.
+ event = A.getPullRequestOpenedEvent()
+ event[1]["pull_request"]["base"]["ref"] = "feature"
+
+ self.fake_github.emitEvent(event)
+ self.waitUntilSettled()
+
+ # Make sure we are still only considering "master" to be
+ # protected.
+ branches = github_connection.getProjectBranches(project, tenant)
+ self.assertEqual(branches, ["master"])
+
# This test verifies that a PR is considered in case it was created for
# a branch just has been set to protected before a tenant reconfiguration
# took place.
diff --git a/tests/unit/test_gitlab_driver.py b/tests/unit/test_gitlab_driver.py
index 6c4d4eeb9..e04a3b5d6 100644
--- a/tests/unit/test_gitlab_driver.py
+++ b/tests/unit/test_gitlab_driver.py
@@ -126,6 +126,27 @@ class TestGitlabDriver(ZuulTestCase):
self.assertTrue(A.approved)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
+ def test_merge_request_opened_imcomplete(self):
+
+ now = time.monotonic()
+ complete_at = now + 3
+ with self.fake_gitlab.enable_delayed_complete_mr(complete_at):
+ description = "This is the\nMR description."
+ A = self.fake_gitlab.openFakeMergeRequest(
+ 'org/project', 'master', 'A', description=description)
+ self.fake_gitlab.emitEvent(
+ A.getMergeRequestOpenedEvent(), project='org/project')
+ self.waitUntilSettled()
+
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test1').result)
+
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test2').result)
+
+ self.assertTrue(self.fake_gitlab._test_web_server.stats["get_mr"] > 2)
+
+ @simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_merge_request_updated(self):
A = self.fake_gitlab.openFakeMergeRequest('org/project', 'master', 'A')
@@ -407,7 +428,7 @@ class TestGitlabDriver(ZuulTestCase):
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_pull_request_with_dyn_reconf(self):
-
+ path = os.path.join(self.upstream_root, 'org/project')
zuul_yaml = [
{'job': {
'name': 'project-test3',
@@ -424,11 +445,13 @@ class TestGitlabDriver(ZuulTestCase):
playbook = "- hosts: all\n tasks: []"
A = self.fake_gitlab.openFakeMergeRequest(
- 'org/project', 'master', 'A')
+ 'org/project', 'master', 'A',
+ base_sha=git.Repo(path).head.object.hexsha)
A.addCommit(
{'.zuul.yaml': yaml.dump(zuul_yaml),
'job.yaml': playbook}
)
+ A.addCommit({"dummy.file": ""})
self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
self.waitUntilSettled()
@@ -440,6 +463,40 @@ class TestGitlabDriver(ZuulTestCase):
self.getJobFromHistory('project-test3').result)
@simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
+ def test_pull_request_with_dyn_reconf_alt(self):
+ with self.fake_gitlab.enable_uncomplete_mr():
+ zuul_yaml = [
+ {'job': {
+ 'name': 'project-test3',
+ 'run': 'job.yaml'
+ }},
+ {'project': {
+ 'check': {
+ 'jobs': [
+ 'project-test3'
+ ]
+ }
+ }}
+ ]
+ playbook = "- hosts: all\n tasks: []"
+ A = self.fake_gitlab.openFakeMergeRequest(
+ 'org/project', 'master', 'A')
+ A.addCommit(
+ {'.zuul.yaml': yaml.dump(zuul_yaml),
+ 'job.yaml': playbook}
+ )
+ A.addCommit({"dummy.file": ""})
+ self.fake_gitlab.emitEvent(A.getMergeRequestOpenedEvent())
+ self.waitUntilSettled()
+
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test1').result)
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test2').result)
+ self.assertEqual('SUCCESS',
+ self.getJobFromHistory('project-test3').result)
+
+ @simple_layout('layouts/basic-gitlab.yaml', driver='gitlab')
def test_ref_updated_and_tenant_reconfigure(self):
self.waitUntilSettled()
diff --git a/tests/unit/test_model_upgrade.py b/tests/unit/test_model_upgrade.py
index a5a49bed4..c6cdee7ea 100644
--- a/tests/unit/test_model_upgrade.py
+++ b/tests/unit/test_model_upgrade.py
@@ -293,6 +293,33 @@ class TestModelUpgrade(ZuulTestCase):
result='SUCCESS', changes='1,1'),
], ordered=False)
+ @model_version(12)
+ def test_model_12_13(self):
+ # Initially queue items will still have the full trigger event
+ # stored in Zookeeper. The trigger event will be converted to
+ # an event info object after the model API update.
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 1)
+
+ # Upgrade our component
+ self.model_test_component_info.model_api = 13
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertHistory([
+ dict(name='project-merge', result='SUCCESS', changes='1,1'),
+ dict(name='project-test1', result='SUCCESS', changes='1,1'),
+ dict(name='project-test2', result='SUCCESS', changes='1,1'),
+ dict(name='project1-project2-integration',
+ result='SUCCESS', changes='1,1'),
+ ], ordered=False)
+
class TestGithubModelUpgrade(ZuulTestCase):
config_file = 'zuul-github-driver.conf'
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 172ed34dc..131034f17 100644
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -7440,6 +7440,85 @@ class TestSchedulerMerges(ZuulTestCase):
result = self._test_project_merge_mode('cherry-pick')
self.assertEqual(result, expected_messages)
+ def test_project_merge_mode_cherrypick_redundant(self):
+ # A redundant commit (that is, one that has already been applied to the
+ # working tree) should be skipped
+ self.executor_server.keep_jobdir = False
+ project = 'org/project-cherry-pick'
+ files = {
+ "foo.txt": "ABC",
+ }
+ A = self.fake_gerrit.addFakeChange(project, 'master', 'A', files=files)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ self.executor_server.hold_jobs_in_build = True
+ B = self.fake_gerrit.addFakeChange(project, 'master', 'B', files=files)
+ B.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ build = self.builds[-1]
+ path = os.path.join(build.jobdir.src_root, 'review.example.com',
+ project)
+ repo = git.Repo(path)
+ repo_messages = [c.message.strip() for c in repo.iter_commits()]
+ repo_messages.reverse()
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ expected_messages = [
+ 'initial commit',
+ 'add content from fixture',
+ 'A-1',
+ ]
+ self.assertHistory([
+ dict(name='project-test1', result='SUCCESS', changes='1,1'),
+ dict(name='project-test1', result='SUCCESS', changes='2,1'),
+ ])
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.assertEqual(repo_messages, expected_messages)
+
+ def test_project_merge_mode_cherrypick_empty(self):
+ # An empty commit (that is, one that doesn't modify any files) should
+ # be preserved
+ self.executor_server.keep_jobdir = False
+ project = 'org/project-cherry-pick'
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange(project, 'master', 'A', empty=True)
+ A.addApproval('Code-Review', 2)
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.waitUntilSettled()
+
+ build = self.builds[-1]
+ path = os.path.join(build.jobdir.src_root, 'review.example.com',
+ project)
+ repo = git.Repo(path)
+ repo_messages = [c.message.strip() for c in repo.iter_commits()]
+ repo_messages.reverse()
+
+ changed_files = list(repo.commit("HEAD").diff(repo.commit("HEAD~1")))
+ self.assertEqual(changed_files, [])
+
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ expected_messages = [
+ 'initial commit',
+ 'add content from fixture',
+ 'A-1',
+ ]
+ self.assertHistory([
+ dict(name='project-test1', result='SUCCESS', changes='1,1'),
+ ])
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(repo_messages, expected_messages)
+
def test_project_merge_mode_cherrypick_branch_merge(self):
"Test that branches can be merged together in cherry-pick mode"
self.create_branch('org/project-merge-branches', 'mp')
diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py
index 004ede862..22d9518a9 100644
--- a/tests/unit/test_v3.py
+++ b/tests/unit/test_v3.py
@@ -3367,6 +3367,42 @@ class TestExtraConfigInDependent(ZuulTestCase):
changes='2,1 1,1'),
], ordered=False)
+ def test_extra_config_in_bundle_change(self):
+ # Test that jobs defined in a extra-config-paths in a repo should be
+ # loaded in a bundle with changes from different repos.
+
+ # Add an empty zuul.yaml here so we are triggering dynamic layout load
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
+ files={'zuul.yaml': ''})
+ B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B',
+ files={'zuul.yaml': ''})
+ C = self.fake_gerrit.addFakeChange('org/project3', 'master', 'C',
+ files={'zuul.yaml': ''})
+ # A B form a bundle, and A depends on C
+ A.data['commitMessage'] = '%s\n\nDepends-On: %s\nDepends-On: %s\n' % (
+ A.subject, B.data['url'], C.data['url'])
+ B.data['commitMessage'] = '%s\n\nDepends-On: %s\n' % (
+ B.subject, A.data['url'])
+
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+ self.waitUntilSettled()
+
+ # Jobs in both changes should be success
+ self.assertHistory([
+ dict(name='project2-private-extra-file', result='SUCCESS',
+ changes='3,1 1,1 2,1'),
+ dict(name='project2-private-extra-dir', result='SUCCESS',
+ changes='3,1 1,1 2,1'),
+ dict(name='project-test1', result='SUCCESS',
+ changes='3,1 2,1 1,1'),
+ dict(name='project3-private-extra-file', result='SUCCESS',
+ changes='3,1'),
+ dict(name='project3-private-extra-dir', result='SUCCESS',
+ changes='3,1'),
+ ], ordered=False)
+
class TestGlobalRepoState(AnsibleZuulTestCase):
config_file = 'zuul-connections-gerrit-and-github.conf'
diff --git a/web/src/containers/FilterToolbar.jsx b/web/src/containers/FilterToolbar.jsx
index 328d60d85..bcf74c062 100644
--- a/web/src/containers/FilterToolbar.jsx
+++ b/web/src/containers/FilterToolbar.jsx
@@ -13,6 +13,7 @@
// under the License.
import React, { useState } from 'react'
+import { useDispatch } from 'react-redux'
import PropTypes from 'prop-types'
import {
Button,
@@ -32,12 +33,14 @@ import {
} from '@patternfly/react-core'
import { FilterIcon, SearchIcon } from '@patternfly/react-icons'
+import { addNotification } from '../actions/notifications'
import { FilterSelect } from './filters/Select'
import { FilterTernarySelect } from './filters/TernarySelect'
import { FilterCheckbox } from './filters/Checkbox'
function FilterToolbar(props) {
+ const dispatch = useDispatch()
const [isCategoryDropdownOpen, setIsCategoryDropdownOpen] = useState(false)
const [currentCategory, setCurrentCategory] = useState(
props.filterCategories[0].title
@@ -58,15 +61,22 @@ function FilterToolbar(props) {
}
function handleInputSend(event, category) {
- const { onFilterChange, filters } = props
+ const { onFilterChange, filters, filterInputValidation } = props
// In case the event comes from a key press, only accept "Enter"
if (event.key && event.key !== 'Enter') {
return
}
- // Ignore empty values
- if (!inputValue) {
+ const validationResult = filterInputValidation(category.key, inputValue)
+ if (!validationResult.success) {
+ dispatch(addNotification(
+ {
+ text: validationResult.message,
+ type: 'error',
+ status: '',
+ url: '',
+ }))
return
}
@@ -250,6 +260,7 @@ FilterToolbar.propTypes = {
onFilterChange: PropTypes.func.isRequired,
filters: PropTypes.object.isRequired,
filterCategories: PropTypes.array.isRequired,
+ filterInputValidation: PropTypes.func.isRequired,
}
function getChipsFromFilters(filters, category) {
diff --git a/web/src/pages/Builds.jsx b/web/src/pages/Builds.jsx
index b89bcd678..f1c449ec0 100644
--- a/web/src/pages/Builds.jsx
+++ b/web/src/pages/Builds.jsx
@@ -195,6 +195,28 @@ class BuildsPage extends React.Component {
this.updateData(filters)
}
}
+
+ filterInputValidation = (filterKey, filterValue) => {
+ // Input value should not be empty for all cases
+ if (!filterValue) {
+ return {
+ success: false,
+ message: 'Input should not be empty'
+ }
+ }
+
+ // For change filter, it must be an integer
+ if (filterKey === 'change' && isNaN(filterValue)) {
+ return {
+ success: false,
+ message: 'Change must be an integer (do not include revision)'
+ }
+ }
+
+ return {
+ success: true
+ }
+ }
handleFilterChange = (newFilters) => {
const { location, history } = this.props
@@ -261,6 +283,7 @@ class BuildsPage extends React.Component {
filterCategories={this.filterCategories}
onFilterChange={this.handleFilterChange}
filters={filters}
+ filterInputValidation={this.filterInputValidation}
/>
<Pagination
toggleTemplate={({ firstIndex, lastIndex, itemCount }) => (
diff --git a/web/src/pages/Buildsets.jsx b/web/src/pages/Buildsets.jsx
index 98d86d640..938309034 100644
--- a/web/src/pages/Buildsets.jsx
+++ b/web/src/pages/Buildsets.jsx
@@ -148,6 +148,28 @@ class BuildsetsPage extends React.Component {
}
}
+ filterInputValidation = (filterKey, filterValue) => {
+ // Input value should not be empty for all cases
+ if (!filterValue) {
+ return {
+ success: false,
+ message: 'Input should not be empty'
+ }
+ }
+
+ // For change filter, it must be an integer
+ if (filterKey === 'change' && isNaN(filterValue)) {
+ return {
+ success: false,
+ message: 'Change must be an integer (do not include revision)'
+ }
+ }
+
+ return {
+ success: true
+ }
+ }
+
handleFilterChange = (newFilters) => {
const { location, history } = this.props
const { filters, itemCount } = this.state
@@ -213,6 +235,7 @@ class BuildsetsPage extends React.Component {
filterCategories={this.filterCategories}
onFilterChange={this.handleFilterChange}
filters={filters}
+ filterInputValidation={this.filterInputValidation}
/>
<Pagination
toggleTemplate={({ firstIndex, lastIndex, itemCount }) => (
diff --git a/web/src/pages/Labels.jsx b/web/src/pages/Labels.jsx
index 10decaa73..1c6e50db5 100644
--- a/web/src/pages/Labels.jsx
+++ b/web/src/pages/Labels.jsx
@@ -15,8 +15,15 @@
import * as React from 'react'
import PropTypes from 'prop-types'
import { connect } from 'react-redux'
-import { Table } from 'patternfly-react'
import { PageSection, PageSectionVariants } from '@patternfly/react-core'
+import {
+ Table,
+ TableVariant,
+ TableHeader,
+ TableBody,
+} from '@patternfly/react-table'
+import { TagIcon } from '@patternfly/react-icons'
+import { IconProperty } from '../Misc'
import { fetchLabelsIfNeeded } from '../actions/labels'
import { Fetchable, Fetching } from '../containers/Fetching'
@@ -54,18 +61,22 @@ class LabelsPage extends React.Component {
return <Fetching />
}
- const headerFormat = value => <Table.Heading>{value}</Table.Heading>
- const cellFormat = value => <Table.Cell>{value}</Table.Cell>
- const columns = []
- const myColumns = ['name']
- myColumns.forEach(column => {
- let formatter = cellFormat
- let prop = column
- columns.push({
- header: {label: column, formatters: [headerFormat]},
- property: prop,
- cell: {formatters: [formatter]}
- })
+ const columns = [
+ {
+ title: (
+ <IconProperty icon={<TagIcon />} value="Name" />
+ ),
+ dataLabel: 'name'
+ }
+ ]
+ let rows = []
+ labels.forEach((label) => {
+ let r = {
+ cells: [
+ {title: label.name, props: {column: 'Name'}}
+ ],
+ }
+ rows.push(r)
})
return (
<PageSection variant={PageSectionVariants.light}>
@@ -75,18 +86,16 @@ class LabelsPage extends React.Component {
fetchCallback={this.updateData}
/>
</PageSection>
- <Table.PfProvider
- striped
- bordered
- hover
- columns={columns}
+ <Table
+ aria-label="Labels Table"
+ variant={TableVariant.compact}
+ cells={columns}
+ rows={rows}
+ className="zuul-table"
>
- <Table.Header/>
- <Table.Body
- rows={labels}
- rowKey="name"
- />
- </Table.PfProvider>
+ <TableHeader />
+ <TableBody />
+ </Table>
</PageSection>
)
}
diff --git a/web/src/pages/Nodes.jsx b/web/src/pages/Nodes.jsx
index a7081ac59..b13878087 100644
--- a/web/src/pages/Nodes.jsx
+++ b/web/src/pages/Nodes.jsx
@@ -15,9 +15,29 @@
import * as React from 'react'
import PropTypes from 'prop-types'
import { connect } from 'react-redux'
-import { Table } from 'patternfly-react'
+import {
+ Table,
+ TableVariant,
+ TableHeader,
+ TableBody,
+} from '@patternfly/react-table'
import * as moment from 'moment'
-import { PageSection, PageSectionVariants } from '@patternfly/react-core'
+import {
+ PageSection,
+ PageSectionVariants,
+ ClipboardCopy,
+} from '@patternfly/react-core'
+import {
+ BuildIcon,
+ ClusterIcon,
+ ConnectedIcon,
+ OutlinedCalendarAltIcon,
+ TagIcon,
+ RunningIcon,
+ PencilAltIcon,
+ ZoneIcon,
+} from '@patternfly/react-icons'
+import { IconProperty } from '../Misc'
import { fetchNodesIfNeeded } from '../actions/nodes'
import { Fetchable } from '../containers/Fetching'
@@ -51,43 +71,69 @@ class NodesPage extends React.Component {
const { remoteData } = this.props
const nodes = remoteData.nodes
- const headerFormat = value => <Table.Heading>{value}</Table.Heading>
- const cellFormat = value => <Table.Cell>{value}</Table.Cell>
- const cellLabelsFormat = value => <Table.Cell>{value.join(',')}</Table.Cell>
- const cellPreFormat = value => (
- <Table.Cell style={{fontFamily: 'Menlo,Monaco,Consolas,monospace'}}>
- {value}
- </Table.Cell>)
- const cellAgeFormat = value => (
- <Table.Cell style={{fontFamily: 'Menlo,Monaco,Consolas,monospace'}}>
- {moment.unix(value).fromNow()}
- </Table.Cell>)
-
- const columns = []
- const myColumns = [
- 'id', 'labels', 'connection', 'server', 'provider', 'state',
- 'age', 'comment'
- ]
- myColumns.forEach(column => {
- let formatter = cellFormat
- let prop = column
- if (column === 'labels') {
- prop = 'type'
- formatter = cellLabelsFormat
- } else if (column === 'connection') {
- prop = 'connection_type'
- } else if (column === 'server') {
- prop = 'external_id'
- formatter = cellPreFormat
- } else if (column === 'age') {
- prop = 'state_time'
- formatter = cellAgeFormat
+ const columns = [
+ {
+ title: (
+ <IconProperty icon={<BuildIcon />} value="ID" />
+ ),
+ dataLabel: 'id',
+ },
+ {
+ title: (
+ <IconProperty icon={<TagIcon />} value="Labels" />
+ ),
+ dataLabel: 'labels',
+ },
+ {
+ title: (
+ <IconProperty icon={<ConnectedIcon />} value="Connection" />
+ ),
+ dataLabel: 'connection',
+ },
+ {
+ title: (
+ <IconProperty icon={<ClusterIcon />} value="Server" />
+ ),
+ dataLabel: 'server',
+ },
+ {
+ title: (
+ <IconProperty icon={<ZoneIcon />} value="Provider" />
+ ),
+ dataLabel: 'provider',
+ },
+ {
+ title: (
+ <IconProperty icon={<RunningIcon />} value="State" />
+ ),
+ dataLabel: 'state',
+ },
+ {
+ title: (
+ <IconProperty icon={<OutlinedCalendarAltIcon />} value="Age" />
+ ),
+ dataLabel: 'age',
+ },
+ {
+ title: (
+ <IconProperty icon={<PencilAltIcon />} value="Comment" />
+ ),
+ dataLabel: 'comment',
}
- columns.push({
- header: {label: column, formatters: [headerFormat]},
- property: prop,
- cell: {formatters: [formatter]}
- })
+ ]
+ let rows = []
+ nodes.forEach((node) => {
+ let r = [
+ {title: node.id, props: {column: 'ID'}},
+ {title: node.type.join(','), props: {column: 'Label' }},
+ {title: node.connection_type, props: {column: 'Connection'}},
+ {title: <ClipboardCopy hoverTip="Copy" clickTip="Copied" variant="inline-compact">{node.external_id}</ClipboardCopy>, props: {column: 'Server'}},
+ {title: node.provider, props: {column: 'Provider'}},
+ {title: node.state, props: {column: 'State'}},
+ {title: moment.unix(node.state_time).fromNow(), props: {column: 'Age'}},
+ {title: node.comment, props: {column: 'Comment'}},
+ ]
+ rows.push({cells: r})
})
return (
<PageSection variant={PageSectionVariants.light}>
@@ -97,18 +143,17 @@ class NodesPage extends React.Component {
fetchCallback={this.updateData}
/>
</PageSection>
- <Table.PfProvider
- striped
- bordered
- hover
- columns={columns}
+
+ <Table
+ aria-label="Nodes Table"
+ variant={TableVariant.compact}
+ cells={columns}
+ rows={rows}
+ className="zuul-table"
>
- <Table.Header/>
- <Table.Body
- rows={nodes}
- rowKey="id"
- />
- </Table.PfProvider>
+ <TableHeader />
+ <TableBody />
+ </Table>
</PageSection>
)
}
diff --git a/web/src/pages/Projects.jsx b/web/src/pages/Projects.jsx
index 13ee81bd8..598133384 100644
--- a/web/src/pages/Projects.jsx
+++ b/web/src/pages/Projects.jsx
@@ -16,8 +16,18 @@ import * as React from 'react'
import PropTypes from 'prop-types'
import { connect } from 'react-redux'
import { Link } from 'react-router-dom'
-import { Table } from 'patternfly-react'
import { PageSection, PageSectionVariants } from '@patternfly/react-core'
+import {
+ Table,
+ TableVariant,
+ TableHeader,
+ TableBody,
+} from '@patternfly/react-table'
+import {
+ CubeIcon,
+ ConnectedIcon,
+} from '@patternfly/react-icons'
+import { IconProperty } from '../Misc'
import { fetchProjectsIfNeeded } from '../actions/projects'
import { Fetchable, Fetching } from '../containers/Fetching'
@@ -59,42 +69,35 @@ class ProjectsPage extends React.Component {
return <Fetching />
}
- const headerFormat = value => <Table.Heading>{value}</Table.Heading>
- const cellFormat = (value) => (
- <Table.Cell>{value}</Table.Cell>)
- const cellProjectFormat = (value, row) => (
- <Table.Cell>
- <Link to={this.props.tenant.linkPrefix + '/project/' + row.rowData.canonical_name}>
- {value}
- </Link>
- </Table.Cell>)
- const cellBuildFormat = (value) => (
- <Table.Cell>
- <Link to={this.props.tenant.linkPrefix + '/builds?project=' + value}>
- builds
- </Link>
- </Table.Cell>)
- const columns = []
- const myColumns = ['name', 'connection', 'type', 'last builds']
- myColumns.forEach(column => {
- let formatter = cellFormat
- let prop = column
- if (column === 'name') {
- formatter = cellProjectFormat
+ const columns = [
+ {
+ title: <IconProperty icon={<CubeIcon />} value="Name" />,
+ dataLabel: 'name',
+ },
+ {
+ title: <IconProperty icon={<ConnectedIcon />} value="Connection" />,
+ dataLabel: 'connection',
+ },
+ {
+ title: 'Type',
+ dataLabel: 'type',
+ },
+ {
+ title: 'Last builds',
+ dataLabel: 'last-builds',
}
- if (column === 'connection') {
- prop = 'connection_name'
+ ]
+ let rows = []
+ projects.forEach((project) => {
+ let r = {
+ cells: [
+ {title: <Link to={this.props.tenant.linkPrefix + '/project/' + project.canonical_name}>{project.name}</Link>, props: {column: 'Name'}},
+ {title: project.connection_name, props: {column: 'Connection'}},
+ {title: project.type, props: {column: 'Type'}},
+ {title: <Link to={this.props.tenant.linkPrefix + '/builds?project=' + project.name}>Builds</Link>, props: {column: 'Last builds'}},
+ ]
}
- if (column === 'last builds') {
- prop = 'name'
- formatter = cellBuildFormat
- }
- columns.push({
- header: {label: column,
- formatters: [headerFormat]},
- property: prop,
- cell: {formatters: [formatter]}
- })
+ rows.push(r)
})
return (
<PageSection variant={PageSectionVariants.light}>
@@ -104,18 +107,16 @@ class ProjectsPage extends React.Component {
fetchCallback={this.updateData}
/>
</PageSection>
- <Table.PfProvider
- striped
- bordered
- hover
- columns={columns}
+ <Table
+ aria-label="Projects Table"
+ variant={TableVariant.compact}
+ cells={columns}
+ rows={rows}
+ className="zuul-table"
>
- <Table.Header/>
- <Table.Body
- rows={projects}
- rowKey="name"
- />
- </Table.PfProvider>
+ <TableHeader />
+ <TableBody />
+ </Table>
</PageSection>
)
}
diff --git a/zuul/ansible/base/callback/zuul_stream.py b/zuul/ansible/base/callback/zuul_stream.py
index b5c14691b..3f886c797 100644
--- a/zuul/ansible/base/callback/zuul_stream.py
+++ b/zuul/ansible/base/callback/zuul_stream.py
@@ -44,6 +44,7 @@ import time
from ansible.plugins.callback import default
from ansible.module_utils._text import to_text
+from ansible.module_utils.parsing.convert_bool import boolean
from zuul.ansible import paths
from zuul.ansible import logconfig
@@ -333,6 +334,10 @@ class CallbackModule(default.CallbackModule):
if (ip in ('localhost', '127.0.0.1')):
# Don't try to stream from localhost
continue
+ if boolean(play_vars[host].get(
+ 'zuul_console_disabled', False)):
+ # The user has told us not to even try
+ continue
if play_vars[host].get('ansible_connection') in ('winrm',):
# The winrm connections don't support streaming for now
continue
diff --git a/zuul/cmd/client.py b/zuul/cmd/client.py
index 031b10a1e..6fa20c7c4 100755
--- a/zuul/cmd/client.py
+++ b/zuul/cmd/client.py
@@ -30,16 +30,14 @@ import time
import textwrap
import requests
import urllib.parse
-from uuid import uuid4
import zuul.cmd
from zuul.lib.config import get_default
-from zuul.model import SystemAttributes, PipelineState
+from zuul.model import SystemAttributes, PipelineState, PipelineChangeList
from zuul.zk import ZooKeeperClient
from zuul.lib.keystorage import KeyStorage
-from zuul.zk.locks import tenant_write_lock
+from zuul.zk.locks import tenant_read_lock, pipeline_lock
from zuul.zk.zkobject import ZKContext
-from zuul.zk.layout import LayoutState, LayoutStateStore
from zuul.zk.components import COMPONENT_REGISTRY
@@ -542,6 +540,10 @@ class Client(zuul.cmd.ZuulApp):
cmd_prune_database.add_argument(
'--older-than',
help='relative time (e.g., "24h" or "180d")')
+ cmd_prune_database.add_argument(
+ '--batch-size',
+ default=10000,
+ help='transaction batch size')
cmd_prune_database.set_defaults(func=self.prune_database)
return parser
@@ -1029,28 +1031,18 @@ class Client(zuul.cmd.ZuulApp):
safe_tenant = urllib.parse.quote_plus(args.tenant)
safe_pipeline = urllib.parse.quote_plus(args.pipeline)
COMPONENT_REGISTRY.create(zk_client)
- with tenant_write_lock(zk_client, args.tenant) as lock:
+ self.log.info('get tenant')
+ with tenant_read_lock(zk_client, args.tenant):
path = f'/zuul/tenant/{safe_tenant}/pipeline/{safe_pipeline}'
- layout_uuid = None
- zk_client.client.delete(
- f'/zuul/tenant/{safe_tenant}/pipeline/{safe_pipeline}',
- recursive=True)
- with ZKContext(zk_client, lock, None, self.log) as context:
- ps = PipelineState.new(context, _path=path,
- layout_uuid=layout_uuid)
- # Force everyone to make a new layout for this tenant in
- # order to rebuild the shared change queues.
- layout_state = LayoutState(
- tenant_name=args.tenant,
- hostname='admin command',
- last_reconfigured=int(time.time()),
- last_reconfigure_event_ltime=-1,
- uuid=uuid4().hex,
- branch_cache_min_ltimes={},
- ltime=ps._zstat.last_modified_transaction_id,
- )
- tenant_layout_state = LayoutStateStore(zk_client, lambda: None)
- tenant_layout_state[args.tenant] = layout_state
+ self.log.info('get pipe')
+ with pipeline_lock(
+ zk_client, args.tenant, args.pipeline
+ ) as plock:
+ self.log.info('got locks')
+ zk_client.client.delete(path, recursive=True)
+ with ZKContext(zk_client, plock, None, self.log) as context:
+ PipelineState.new(context, _path=path, layout_uuid=None)
+ PipelineChangeList.new(context)
sys.exit(0)
@@ -1061,7 +1053,7 @@ class Client(zuul.cmd.ZuulApp):
cutoff = parse_cutoff(now, args.before, args.older_than)
self.configure_connections(source_only=False, require_sql=True)
connection = self.connections.getSqlConnection()
- connection.deleteBuildsets(cutoff)
+ connection.deleteBuildsets(cutoff, args.batch_size)
sys.exit(0)
diff --git a/zuul/configloader.py b/zuul/configloader.py
index fe22fe0f8..4f472cb4e 100644
--- a/zuul/configloader.py
+++ b/zuul/configloader.py
@@ -2151,21 +2151,26 @@ class TenantParser(object):
for future in futures:
future.result()
- try:
- self._processCatJobs(abide, tenant, loading_errors, jobs,
- min_ltimes)
- except Exception:
- self.log.exception("Error processing cat jobs, canceling")
- for job in jobs:
+ for i, job in enumerate(jobs, start=1):
+ try:
try:
- self.log.debug("Canceling cat job %s", job)
+ self._processCatJob(abide, tenant, loading_errors, job,
+ min_ltimes)
+ except TimeoutError:
self.merger.cancel(job)
- except Exception:
- self.log.exception("Unable to cancel job %s", job)
- if not ignore_cat_exception:
- raise
- if not ignore_cat_exception:
- raise
+ raise
+ except Exception:
+ self.log.exception("Error processing cat job")
+ if not ignore_cat_exception:
+ # Cancel remaining jobs
+ for cancel_job in jobs[i:]:
+ self.log.debug("Canceling cat job %s", cancel_job)
+ try:
+ self.merger.cancel(cancel_job)
+ except Exception:
+ self.log.exception(
+ "Unable to cancel job %s", cancel_job)
+ raise
def _cacheTenantYAMLBranch(self, abide, tenant, loading_errors, min_ltimes,
tpc, project, branch, jobs):
@@ -2234,49 +2239,48 @@ class TenantParser(object):
job.source_context = source_context
jobs.append(job)
- def _processCatJobs(self, abide, tenant, loading_errors, jobs, min_ltimes):
+ def _processCatJob(self, abide, tenant, loading_errors, job, min_ltimes):
# Called at the end of _cacheTenantYAML after all cat jobs
# have been submitted
- for job in jobs:
- self.log.debug("Waiting for cat job %s" % (job,))
- res = job.wait(self.merger.git_timeout)
- if not res:
- # We timed out
- raise Exception("Cat job %s timed out; consider setting "
- "merger.git_timeout in zuul.conf" % (job,))
- if not job.updated:
- raise Exception("Cat job %s failed" % (job,))
- self.log.debug("Cat job %s got files %s" %
- (job, job.files.keys()))
-
- self._updateUnparsedBranchCache(abide, tenant, job.source_context,
- job.files, loading_errors,
- job.ltime, min_ltimes)
-
- # Save all config files in Zookeeper (not just for the current tpc)
- files_cache = self.unparsed_config_cache.getFilesCache(
- job.source_context.project_canonical_name,
- job.source_context.branch)
- with self.unparsed_config_cache.writeLock(
- job.source_context.project_canonical_name):
- # Prevent files cache ltime from going backward
- if files_cache.ltime >= job.ltime:
- self.log.info(
- "Discarding job %s result since the files cache was "
- "updated in the meantime", job)
- continue
- # Since the cat job returns all required config files
- # for ALL tenants the project is a part of, we can
- # clear the whole cache and then populate it with the
- # updated content.
- files_cache.clear()
- for fn, content in job.files.items():
- # Cache file in Zookeeper
- if content is not None:
- files_cache[fn] = content
- files_cache.setValidFor(job.extra_config_files,
- job.extra_config_dirs,
- job.ltime)
+ self.log.debug("Waiting for cat job %s" % (job,))
+ res = job.wait(self.merger.git_timeout)
+ if not res:
+ # We timed out
+ raise TimeoutError(f"Cat job {job} timed out; consider setting "
+ "merger.git_timeout in zuul.conf")
+ if not job.updated:
+ raise Exception("Cat job %s failed" % (job,))
+ self.log.debug("Cat job %s got files %s" %
+ (job, job.files.keys()))
+
+ self._updateUnparsedBranchCache(abide, tenant, job.source_context,
+ job.files, loading_errors,
+ job.ltime, min_ltimes)
+
+ # Save all config files in Zookeeper (not just for the current tpc)
+ files_cache = self.unparsed_config_cache.getFilesCache(
+ job.source_context.project_canonical_name,
+ job.source_context.branch)
+ with self.unparsed_config_cache.writeLock(
+ job.source_context.project_canonical_name):
+ # Prevent files cache ltime from going backward
+ if files_cache.ltime >= job.ltime:
+ self.log.info(
+ "Discarding job %s result since the files cache was "
+ "updated in the meantime", job)
+ return
+ # Since the cat job returns all required config files
+ # for ALL tenants the project is a part of, we can
+ # clear the whole cache and then populate it with the
+ # updated content.
+ files_cache.clear()
+ for fn, content in job.files.items():
+ # Cache file in Zookeeper
+ if content is not None:
+ files_cache[fn] = content
+ files_cache.setValidFor(job.extra_config_files,
+ job.extra_config_dirs,
+ job.ltime)
def _updateUnparsedBranchCache(self, abide, tenant, source_context, files,
loading_errors, ltime, min_ltimes):
diff --git a/zuul/driver/elasticsearch/reporter.py b/zuul/driver/elasticsearch/reporter.py
index 7802cb609..e5e90e052 100644
--- a/zuul/driver/elasticsearch/reporter.py
+++ b/zuul/driver/elasticsearch/reporter.py
@@ -103,7 +103,9 @@ class ElasticsearchReporter(BaseReporter):
build_doc['job_vars'] = job.variables
if self.index_returned_vars:
- build_doc['job_returned_vars'] = build.result_data
+ rdata = build.result_data.copy()
+ rdata.pop('zuul', None)
+ build_doc['job_returned_vars'] = rdata
docs.append(build_doc)
diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py
index 276365e1d..990b7b235 100644
--- a/zuul/driver/gerrit/gerritconnection.py
+++ b/zuul/driver/gerrit/gerritconnection.py
@@ -161,6 +161,8 @@ class GerritEventConnector(threading.Thread):
IGNORED_EVENTS = (
'cache-eviction', # evict-cache plugin
+ 'fetch-ref-replicated',
+ 'fetch-ref-replication-scheduled',
'ref-replicated',
'ref-replication-scheduled',
'ref-replication-done'
@@ -1180,9 +1182,34 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
return True
if change.wip:
return False
- if change.missing_labels <= set(allow_needs):
- return True
- return False
+ if change.missing_labels > set(allow_needs):
+ self.log.debug("Unable to merge due to "
+ "missing labels: %s", change.missing_labels)
+ return False
+ for sr in change.submit_requirements:
+ if sr.get('status') == 'UNSATISFIED':
+ # Otherwise, we don't care and should skip.
+
+ # We're going to look at each unsatisfied submit
+ # requirement, and if one of the involved labels is an
+ # "allow_needs" label, we will assume that Zuul may be
+ # able to take an action which can cause the
+ # requirement to be satisfied, and we will ignore it.
+ # Otherwise, it is likely a requirement that Zuul can
+ # not alter in which case the requirement should stand
+ # and block merging.
+ result = sr.get("submittability_expression_result", {})
+ expression = result.get("expression", '')
+ expr_contains_allow = False
+ for allow in allow_needs:
+ if f'label:{allow}' in expression:
+ expr_contains_allow = True
+ break
+ if not expr_contains_allow:
+ self.log.debug("Unable to merge due to "
+ "submit requirement: %s", sr)
+ return False
+ return True
def getProjectOpenChanges(self, project: Project) -> List[GerritChange]:
# This is a best-effort function in case Gerrit is unable to return
@@ -1441,13 +1468,22 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
return data
def queryChangeHTTP(self, number, event=None):
- data = self.get('changes/%s?o=DETAILED_ACCOUNTS&o=CURRENT_REVISION&'
- 'o=CURRENT_COMMIT&o=CURRENT_FILES&o=LABELS&'
- 'o=DETAILED_LABELS' % (number,))
+ query = ('changes/%s?o=DETAILED_ACCOUNTS&o=CURRENT_REVISION&'
+ 'o=CURRENT_COMMIT&o=CURRENT_FILES&o=LABELS&'
+ 'o=DETAILED_LABELS' % (number,))
+ if self.version >= (3, 5, 0):
+ query += '&o=SUBMIT_REQUIREMENTS'
+ data = self.get(query)
related = self.get('changes/%s/revisions/%s/related' % (
number, data['current_revision']))
- files = self.get('changes/%s/revisions/%s/files?parent=1' % (
- number, data['current_revision']))
+
+ files_query = 'changes/%s/revisions/%s/files' % (
+ number, data['current_revision'])
+
+ if data['revisions'][data['current_revision']]['commit']['parents']:
+ files_query += '?parent=1'
+
+ files = self.get(files_query)
return data, related, files
def queryChange(self, number, event=None):
diff --git a/zuul/driver/gerrit/gerritmodel.py b/zuul/driver/gerrit/gerritmodel.py
index 0ac3e7f9d..4ac291f2b 100644
--- a/zuul/driver/gerrit/gerritmodel.py
+++ b/zuul/driver/gerrit/gerritmodel.py
@@ -34,6 +34,7 @@ class GerritChange(Change):
self.wip = None
self.approvals = []
self.missing_labels = set()
+ self.submit_requirements = []
self.commit = None
self.zuul_query_ltime = None
@@ -52,6 +53,7 @@ class GerritChange(Change):
"wip": self.wip,
"approvals": self.approvals,
"missing_labels": list(self.missing_labels),
+ "submit_requirements": self.submit_requirements,
"commit": self.commit,
"zuul_query_ltime": self.zuul_query_ltime,
})
@@ -64,6 +66,7 @@ class GerritChange(Change):
self.wip = data["wip"]
self.approvals = data["approvals"]
self.missing_labels = set(data["missing_labels"])
+ self.submit_requirements = data.get("submit_requirements", [])
self.commit = data.get("commit")
self.zuul_query_ltime = data.get("zuul_query_ltime")
@@ -189,6 +192,7 @@ class GerritChange(Change):
if 'approved' in label_data:
continue
self.missing_labels.add(label_name)
+ self.submit_requirements = data.get('submit_requirements', [])
self.open = data['status'] == 'NEW'
self.status = data['status']
self.wip = data.get('work_in_progress', False)
diff --git a/zuul/driver/gerrit/gerritsource.py b/zuul/driver/gerrit/gerritsource.py
index f42e93254..b0bd3c448 100644
--- a/zuul/driver/gerrit/gerritsource.py
+++ b/zuul/driver/gerrit/gerritsource.py
@@ -164,11 +164,14 @@ class GerritSource(BaseSource):
change = self.connection._getChange(change_key)
changes[change_key] = change
- for change in changes.values():
- for git_key in change.git_needs_changes:
- if git_key in changes:
+ # Convert to list here because the recursive call can mutate
+ # the set.
+ for change in list(changes.values()):
+ for git_change_ref in change.git_needs_changes:
+ change_key = ChangeKey.fromReference(git_change_ref)
+ if change_key in changes:
continue
- git_change = self.getChange(git_key)
+ git_change = self.getChange(change_key)
if not git_change.topic or git_change.topic == topic:
continue
self.getChangesByTopic(git_change.topic, changes)
diff --git a/zuul/driver/github/githubconnection.py b/zuul/driver/github/githubconnection.py
index 182c83bae..a1353cb4d 100644
--- a/zuul/driver/github/githubconnection.py
+++ b/zuul/driver/github/githubconnection.py
@@ -81,6 +81,10 @@ ANNOTATION_LEVELS = {
"warning": "warning",
"error": "failure",
}
+# The maximum size for the 'message' field is 64 KB. Since it's unclear
+# from the Github docs if the unit is KiB or KB we'll use KB to be on
+# the safe side.
+ANNOTATION_MAX_MESSAGE_SIZE = 64 * 1000
EventTuple = collections.namedtuple(
"EventTuple", [
@@ -403,7 +407,8 @@ class GithubEventProcessor(object):
# Returns empty on unhandled events
return
- self.log.debug("Handling %s event", self.event_type)
+ self.log.debug("Handling %s event with installation id %s",
+ self.event_type, installation_id)
events = []
try:
events = method()
@@ -439,7 +444,11 @@ class GithubEventProcessor(object):
# branch is now protected.
if hasattr(event, "branch") and event.branch:
protected = None
- if change:
+ # Only use the `branch_protected` flag if the
+ # target branch of change and event are the same.
+ # The base branch could have changed in the
+ # meantime.
+ if change and change.branch == event.branch:
# PR based events already have the information if the
# target branch is protected so take the information
# from there.
@@ -675,6 +684,11 @@ class GithubEventProcessor(object):
branch, project_name)
events.append(
self._branch_protection_rule_to_event(project_name, branch))
+
+ for event in events:
+ # Make sure every event has a branch cache ltime
+ self.connection.clearConnectionCacheOnBranchEvent(event)
+
return events
def _branch_protection_rule_to_event(self, project_name, branch):
@@ -2428,7 +2442,9 @@ class GithubConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
raw_annotation = {
"path": fn,
"annotation_level": annotation_level,
- "message": comment["message"],
+ "message": comment["message"].encode(
+ "utf8")[:ANNOTATION_MAX_MESSAGE_SIZE].decode(
+ "utf8", "ignore"),
"start_line": start_line,
"end_line": end_line,
"start_column": start_column,
diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py
index 1f44303bd..396516038 100644
--- a/zuul/driver/github/githubreporter.py
+++ b/zuul/driver/github/githubreporter.py
@@ -193,13 +193,13 @@ class GithubReporter(BaseReporter):
self.log.warning('Merge mode %s not supported by Github', mode)
raise MergeFailure('Merge mode %s not supported by Github' % mode)
- merge_mode = self.merge_modes[merge_mode]
project = item.change.project.name
pr_number = item.change.number
sha = item.change.patchset
log.debug('Reporting change %s, params %s, merging via API',
item.change, self.config)
- message = self._formatMergeMessage(item.change)
+ message = self._formatMergeMessage(item.change, merge_mode)
+ merge_mode = self.merge_modes[merge_mode]
for i in [1, 2]:
try:
@@ -319,10 +319,13 @@ class GithubReporter(BaseReporter):
self.connection.unlabelPull(project, pr_number, label,
zuul_event_id=item.event)
- def _formatMergeMessage(self, change):
+ def _formatMergeMessage(self, change, merge_mode):
message = []
- if change.title:
- message.append(change.title)
+ # For squash merges we don't need to add the title to the body
+ # as it will already be set as the commit subject.
+ if merge_mode != model.MERGER_SQUASH_MERGE:
+ if change.title:
+ message.append(change.title)
if change.body_text:
message.append(change.body_text)
merge_message = "\n\n".join(message)
diff --git a/zuul/driver/gitlab/gitlabconnection.py b/zuul/driver/gitlab/gitlabconnection.py
index 1515db8df..da423f085 100644
--- a/zuul/driver/gitlab/gitlabconnection.py
+++ b/zuul/driver/gitlab/gitlabconnection.py
@@ -281,6 +281,7 @@ class GitlabAPIClient():
self.api_token = api_token
self.keepalive = keepalive
self.disable_pool = disable_pool
+ self.get_mr_wait_factor = 2
self.headers = {'Authorization': 'Bearer %s' % (
self.api_token)}
@@ -342,11 +343,36 @@ class GitlabAPIClient():
# https://docs.gitlab.com/ee/api/merge_requests.html#get-single-mr
def get_mr(self, project_name, number, zuul_event_id=None):
- path = "/projects/%s/merge_requests/%s" % (
- quote_plus(project_name), number)
- resp = self.get(self.baseurl + path, zuul_event_id=zuul_event_id)
- self._manage_error(*resp, zuul_event_id=zuul_event_id)
- return resp[0]
+ log = get_annotated_logger(self.log, zuul_event_id)
+ attempts = 0
+
+ def _get_mr():
+ path = "/projects/%s/merge_requests/%s" % (
+ quote_plus(project_name), number)
+ resp = self.get(self.baseurl + path, zuul_event_id=zuul_event_id)
+ self._manage_error(*resp, zuul_event_id=zuul_event_id)
+ return resp[0]
+
+ # The Gitlab API might not return a complete MR description as
+ # some attributes are updated asynchronously. This loop ensures
+ # we query the API until all async attributes are available or until
+ # a defined delay is reached.
+ while True:
+ attempts += 1
+ mr = _get_mr()
+ # The diff_refs attribute is updated asynchronously
+ if all(map(lambda k: mr.get(k, None), ['diff_refs'])):
+ return mr
+ if attempts > 4:
+ log.warning(
+ "Fetched MR %s#%s with imcomplete data" % (
+ project_name, number))
+ return mr
+ wait_delay = attempts * self.get_mr_wait_factor
+ log.info(
+ "Will retry to fetch %s#%s due to imcomplete data "
+ "(in %s seconds) ..." % (project_name, number, wait_delay))
+ time.sleep(wait_delay)
# https://docs.gitlab.com/ee/api/branches.html#list-repository-branches
def get_project_branches(self, project_name, exclude_unprotected,
@@ -667,8 +693,12 @@ class GitlabConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection):
change.ref = "refs/merge-requests/%s/head" % change.number
change.branch = change.mr['target_branch']
change.is_current_patchset = (change.mr['sha'] == change.patchset)
- change.base_sha = change.mr['diff_refs'].get('base_sha')
- change.commit_id = change.mr['diff_refs'].get('head_sha')
+ change.commit_id = event.patch_number
+ diff_refs = change.mr.get("diff_refs", {})
+ if diff_refs:
+ change.base_sha = diff_refs.get('base_sha')
+ else:
+ change.base_sha = None
change.owner = change.mr['author'].get('username')
# Files changes are not part of the Merge Request data
# See api/merge_requests.html#get-single-mr-changes
diff --git a/zuul/driver/mqtt/mqttconnection.py b/zuul/driver/mqtt/mqttconnection.py
index 7f221282f..4a028ba23 100644
--- a/zuul/driver/mqtt/mqttconnection.py
+++ b/zuul/driver/mqtt/mqttconnection.py
@@ -64,6 +64,12 @@ class MQTTConnection(BaseConnection):
def onLoad(self, zk_client, component_registry):
self.log.debug("Starting MQTT Connection")
+
+ # If the connection was not loaded by a scheduler, but by e.g.
+ # zuul-web, we want to stop here.
+ if not self.sched:
+ return
+
try:
self.client.connect(
self.connection_config.get('server', 'localhost'),
@@ -76,10 +82,11 @@ class MQTTConnection(BaseConnection):
self.client.loop_start()
def onStop(self):
- self.log.debug("Stopping MQTT Connection")
- self.client.loop_stop()
- self.client.disconnect()
- self.connected = False
+ if self.connected:
+ self.log.debug("Stopping MQTT Connection")
+ self.client.loop_stop()
+ self.client.disconnect()
+ self.connected = False
def publish(self, topic, message, qos, zuul_event_id):
log = get_annotated_logger(self.log, zuul_event_id)
diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py
index 2d5c39ec3..7a4aea626 100644
--- a/zuul/driver/sql/sqlconnection.py
+++ b/zuul/driver/sql/sqlconnection.py
@@ -247,12 +247,25 @@ class DatabaseSession(object):
except sqlalchemy.orm.exc.MultipleResultsFound:
raise Exception("Multiple buildset found with uuid %s", uuid)
- def deleteBuildsets(self, cutoff):
+ def deleteBuildsets(self, cutoff, batch_size):
"""Delete buildsets before the cutoff"""
# delete buildsets updated before the cutoff
- for buildset in self.getBuildsets(updated_max=cutoff):
- self.session().delete(buildset)
+ deleted = True
+ while deleted:
+ deleted = False
+ oldest = None
+ for buildset in self.getBuildsets(
+ updated_max=cutoff, limit=batch_size):
+ deleted = True
+ if oldest is None:
+ oldest = buildset.updated
+ else:
+ oldest = min(oldest, buildset.updated)
+ self.session().delete(buildset)
+ self.session().commit()
+ if deleted:
+ self.log.info("Deleted from %s to %s", oldest, cutoff)
class SQLConnection(BaseConnection):
@@ -409,7 +422,10 @@ class SQLConnection(BaseConnection):
final = sa.Column(sa.Boolean)
held = sa.Column(sa.Boolean)
nodeset = sa.Column(sa.String(255))
- buildset = orm.relationship(BuildSetModel, backref="builds")
+ buildset = orm.relationship(BuildSetModel,
+ backref=orm.backref(
+ "builds",
+ cascade="all, delete-orphan"))
sa.Index(self.table_prefix + 'job_name_buildset_id_idx',
job_name, buildset_id)
@@ -468,7 +484,10 @@ class SQLConnection(BaseConnection):
name = sa.Column(sa.String(255))
url = sa.Column(sa.TEXT())
meta = sa.Column('metadata', sa.TEXT())
- build = orm.relationship(BuildModel, backref="artifacts")
+ build = orm.relationship(BuildModel,
+ backref=orm.backref(
+ "artifacts",
+ cascade="all, delete-orphan"))
class ProvidesModel(Base):
__tablename__ = self.table_prefix + PROVIDES_TABLE
@@ -476,7 +495,10 @@ class SQLConnection(BaseConnection):
build_id = sa.Column(sa.Integer, sa.ForeignKey(
self.table_prefix + BUILD_TABLE + ".id"))
name = sa.Column(sa.String(255))
- build = orm.relationship(BuildModel, backref="provides")
+ build = orm.relationship(BuildModel,
+ backref=orm.backref(
+ "provides",
+ cascade="all, delete-orphan"))
class BuildEventModel(Base):
__tablename__ = self.table_prefix + BUILD_EVENTS_TABLE
@@ -486,7 +508,10 @@ class SQLConnection(BaseConnection):
event_time = sa.Column(sa.DateTime)
event_type = sa.Column(sa.String(255))
description = sa.Column(sa.TEXT())
- build = orm.relationship(BuildModel, backref="build_events")
+ build = orm.relationship(BuildModel,
+ backref=orm.backref(
+ "build_events",
+ cascade="all, delete-orphan"))
self.buildEventModel = BuildEventModel
self.zuul_build_event_table = self.buildEventModel.__table__
diff --git a/zuul/executor/server.py b/zuul/executor/server.py
index a49bbbbbf..6dbf62de0 100644
--- a/zuul/executor/server.py
+++ b/zuul/executor/server.py
@@ -1931,6 +1931,7 @@ class AnsibleJob(object):
region=node.region,
host_id=node.host_id,
external_id=getattr(node, 'external_id', None),
+ slot=node.slot,
interface_ip=node.interface_ip,
public_ipv4=node.public_ipv4,
private_ipv4=node.private_ipv4,
@@ -3632,6 +3633,10 @@ class ExecutorServer(BaseMergeServer):
log.exception('Process pool got broken')
self.resetProcessPool()
task.transient_error = True
+ except IOError:
+ log.exception('Got I/O error while updating repo %s/%s',
+ task.connection_name, task.project_name)
+ task.transient_error = True
except Exception:
log.exception('Got exception while updating repo %s/%s',
task.connection_name, task.project_name)
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py
index 36361df11..c3d082a47 100644
--- a/zuul/manager/__init__.py
+++ b/zuul/manager/__init__.py
@@ -243,7 +243,7 @@ class PipelineManager(metaclass=ABCMeta):
and self.useDependenciesByTopic(change.project))
if (update_commit_dependencies
or update_topic_dependencies):
- self.updateCommitDependencies(change, None, event=None)
+ self.updateCommitDependencies(change, event=None)
self._change_cache[change.cache_key] = change
resolved_changes.append(change)
return resolved_changes
@@ -285,11 +285,18 @@ class PipelineManager(metaclass=ABCMeta):
return True
return False
- def isAnyVersionOfChangeInPipeline(self, change):
- # Checks any items in the pipeline
+ def isChangeRelevantToPipeline(self, change):
+ # Checks if any version of the change or its deps matches any
+ # item in the pipeline.
for change_key in self.pipeline.change_list.getChangeKeys():
if change.cache_stat.key.isSameChange(change_key):
return True
+ if isinstance(change, model.Change):
+ for dep_change_ref in change.getNeedsChanges(
+ self.useDependenciesByTopic(change.project)):
+ dep_change_key = ChangeKey.fromReference(dep_change_ref)
+ if change.cache_stat.key.isSameChange(dep_change_key):
+ return True
return False
def isChangeAlreadyInQueue(self, change, change_queue):
@@ -315,7 +322,7 @@ class PipelineManager(metaclass=ABCMeta):
to_refresh.add(item.change)
for existing_change in to_refresh:
- self.updateCommitDependencies(existing_change, None, event)
+ self.updateCommitDependencies(existing_change, event)
def reportEnqueue(self, item):
if not self.pipeline.state.disabled:
@@ -516,7 +523,8 @@ class PipelineManager(metaclass=ABCMeta):
def addChange(self, change, event, quiet=False, enqueue_time=None,
ignore_requirements=False, live=True,
- change_queue=None, history=None, dependency_graph=None):
+ change_queue=None, history=None, dependency_graph=None,
+ skip_presence_check=False):
log = get_annotated_logger(self.log, event)
log.debug("Considering adding change %s" % change)
@@ -531,7 +539,9 @@ class PipelineManager(metaclass=ABCMeta):
# If we are adding a live change, check if it's a live item
# anywhere in the pipeline. Otherwise, we will perform the
# duplicate check below on the specific change_queue.
- if live and self.isChangeAlreadyInPipeline(change):
+ if (live and
+ self.isChangeAlreadyInPipeline(change) and
+ not skip_presence_check):
log.debug("Change %s is already in pipeline, ignoring" % change)
return True
@@ -564,7 +574,7 @@ class PipelineManager(metaclass=ABCMeta):
# to date and this is a noop; otherwise, we need to refresh
# them anyway.
if isinstance(change, model.Change):
- self.updateCommitDependencies(change, None, event)
+ self.updateCommitDependencies(change, event)
with self.getChangeQueue(change, event, change_queue) as change_queue:
if not change_queue:
@@ -590,8 +600,10 @@ class PipelineManager(metaclass=ABCMeta):
log.debug("History after enqueuing changes ahead: %s", history)
if self.isChangeAlreadyInQueue(change, change_queue):
- log.debug("Change %s is already in queue, ignoring" % change)
- return True
+ if not skip_presence_check:
+ log.debug("Change %s is already in queue, ignoring",
+ change)
+ return True
cycle = []
if isinstance(change, model.Change):
@@ -625,7 +637,7 @@ class PipelineManager(metaclass=ABCMeta):
if enqueue_time:
item.enqueue_time = enqueue_time
item.live = live
- self.reportStats(item, added=True)
+ self.reportStats(item, trigger_event=event)
item.quiet = quiet
if item.live:
@@ -857,7 +869,7 @@ class PipelineManager(metaclass=ABCMeta):
self.pipeline.tenant.name][other_pipeline.name].put(
event, needs_result=False)
- def updateCommitDependencies(self, change, change_queue, event):
+ def updateCommitDependencies(self, change, event):
log = get_annotated_logger(self.log, event)
must_update_commit_deps = (
@@ -1448,16 +1460,17 @@ class PipelineManager(metaclass=ABCMeta):
item.bundle and
item.bundle.updatesConfig(tenant) and tpc is not None
):
- extra_config_files = set(tpc.extra_config_files)
- extra_config_dirs = set(tpc.extra_config_dirs)
- # Merge extra_config_files and extra_config_dirs of the
- # dependent change
- for item_ahead in item.items_ahead:
- tpc_ahead = tenant.project_configs.get(
- item_ahead.change.project.canonical_name)
- if tpc_ahead:
- extra_config_files.update(tpc_ahead.extra_config_files)
- extra_config_dirs.update(tpc_ahead.extra_config_dirs)
+ # Collect extra config files and dirs of required changes.
+ extra_config_files = set()
+ extra_config_dirs = set()
+ for merger_item in item.current_build_set.merger_items:
+ source = self.sched.connections.getSource(
+ merger_item["connection"])
+ project = source.getProject(merger_item["project"])
+ tpc = tenant.project_configs.get(project.canonical_name)
+ if tpc:
+ extra_config_files.update(tpc.extra_config_files)
+ extra_config_dirs.update(tpc.extra_config_dirs)
ready = self.scheduleMerge(
item,
@@ -1554,6 +1567,7 @@ class PipelineManager(metaclass=ABCMeta):
log.info("Dequeuing change %s because "
"it can no longer merge" % item.change)
self.cancelJobs(item)
+ quiet_dequeue = False
if item.isBundleFailing():
item.setDequeuedBundleFailing('Bundle is failing')
elif not meets_reqs:
@@ -1565,7 +1579,28 @@ class PipelineManager(metaclass=ABCMeta):
else:
msg = f'Change {clist} is needed.'
item.setDequeuedNeedingChange(msg)
- if item.live:
+ # If all the dependencies are already in the pipeline
+ # (but not ahead of this change), then we probably
+ # just added updated versions of them, possibly
+ # updating a cycle. In that case, attempt to
+ # re-enqueue this change with the updated deps.
+ if (item.live and
+ all([self.isChangeAlreadyInPipeline(c)
+ for c in needs_changes])):
+ # Try enqueue, if that succeeds, keep this dequeue quiet
+ try:
+ log.info("Attempting re-enqueue of change %s",
+ item.change)
+ quiet_dequeue = self.addChange(
+ item.change, item.event,
+ enqueue_time=item.enqueue_time,
+ quiet=True,
+ skip_presence_check=True)
+ except Exception:
+ log.exception("Unable to re-enqueue change %s "
+ "which is missing dependencies",
+ item.change)
+ if item.live and not quiet_dequeue:
try:
self.reportItem(item)
except exceptions.MergeFailure:
@@ -2197,7 +2232,7 @@ class PipelineManager(metaclass=ABCMeta):
log.error("Reporting item %s received: %s", item, ret)
return action, (not ret)
- def reportStats(self, item, added=False):
+ def reportStats(self, item, trigger_event=None):
if not self.sched.statsd:
return
try:
@@ -2236,18 +2271,21 @@ class PipelineManager(metaclass=ABCMeta):
if dt:
self.sched.statsd.timing(key + '.resident_time', dt)
self.sched.statsd.incr(key + '.total_changes')
- if added and hasattr(item.event, 'arrived_at_scheduler_timestamp'):
+ if (
+ trigger_event
+ and hasattr(trigger_event, 'arrived_at_scheduler_timestamp')
+ ):
now = time.time()
- arrived = item.event.arrived_at_scheduler_timestamp
+ arrived = trigger_event.arrived_at_scheduler_timestamp
processing = (now - arrived) * 1000
- elapsed = (now - item.event.timestamp) * 1000
+ elapsed = (now - trigger_event.timestamp) * 1000
self.sched.statsd.timing(
basekey + '.event_enqueue_processing_time',
processing)
self.sched.statsd.timing(
basekey + '.event_enqueue_time', elapsed)
self.reportPipelineTiming('event_enqueue_time',
- item.event.timestamp)
+ trigger_event.timestamp)
except Exception:
self.log.exception("Exception reporting pipeline stats")
diff --git a/zuul/merger/merger.py b/zuul/merger/merger.py
index e4688a1b7..845925bfa 100644
--- a/zuul/merger/merger.py
+++ b/zuul/merger/merger.py
@@ -595,14 +595,32 @@ class Repo(object):
log = get_annotated_logger(self.log, zuul_event_id)
repo = self.createRepoObject(zuul_event_id)
self.fetch(ref, zuul_event_id=zuul_event_id)
- if len(repo.commit("FETCH_HEAD").parents) > 1:
+ fetch_head = repo.commit("FETCH_HEAD")
+ if len(fetch_head.parents) > 1:
args = ["-s", "resolve", "FETCH_HEAD"]
log.debug("Merging %s with args %s instead of cherry-picking",
ref, args)
repo.git.merge(*args)
else:
log.debug("Cherry-picking %s", ref)
- repo.git.cherry_pick("FETCH_HEAD")
+ # Git doesn't have an option to ignore commits that are already
+ # applied to the working tree when cherry-picking, so pass the
+ # --keep-redundant-commits option, which will cause it to make an
+ # empty commit
+ repo.git.cherry_pick("FETCH_HEAD", keep_redundant_commits=True)
+
+ # If the newly applied commit is empty, it means either:
+ # 1) The commit being cherry-picked was empty, in which the empty
+ # commit should be kept
+ # 2) The commit being cherry-picked was already applied to the
+ # tree, in which case the empty commit should be backed out
+ head = repo.commit("HEAD")
+ parent = head.parents[0]
+ if not any(head.diff(parent)) and \
+ any(fetch_head.diff(fetch_head.parents[0])):
+ log.debug("%s was already applied. Removing it", ref)
+ self._checkout(repo, parent)
+
return repo.head.commit
def merge(self, ref, strategy=None, zuul_event_id=None):
@@ -704,9 +722,11 @@ class Repo(object):
ret = {}
repo = self.createRepoObject(zuul_event_id)
if branch:
- tree = repo.heads[branch].commit.tree
+ head = repo.heads[branch].commit
else:
- tree = repo.commit(commit).tree
+ head = repo.commit(commit)
+ log.debug("Getting files for %s at %s", self.local_path, head.hexsha)
+ tree = head.tree
for fn in files:
if fn in tree:
if tree[fn].type != 'blob':
diff --git a/zuul/model.py b/zuul/model.py
index 89a835a15..e36f9d670 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -1408,6 +1408,7 @@ class Node(ConfigObject):
self.private_ipv6 = None
self.connection_port = 22
self.connection_type = None
+ self.slot = None
self._keys = []
self.az = None
self.provider = None
@@ -4676,6 +4677,37 @@ class BuildSet(zkobject.ZKObject):
return Attributes(uuid=self.uuid)
+class EventInfo:
+
+ def __init__(self):
+ self.zuul_event_id = None
+ self.timestamp = time.time()
+ self.span_context = None
+
+ @classmethod
+ def fromEvent(cls, event):
+ tinfo = cls()
+ tinfo.zuul_event_id = event.zuul_event_id
+ tinfo.timestamp = event.timestamp
+ tinfo.span_context = event.span_context
+ return tinfo
+
+ @classmethod
+ def fromDict(cls, d):
+ tinfo = cls()
+ tinfo.zuul_event_id = d["zuul_event_id"]
+ tinfo.timestamp = d["timestamp"]
+ tinfo.span_context = d["span_context"]
+ return tinfo
+
+ def toDict(self):
+ return {
+ "zuul_event_id": self.zuul_event_id,
+ "timestamp": self.timestamp,
+ "span_context": self.span_context,
+ }
+
+
class QueueItem(zkobject.ZKObject):
"""Represents the position of a Change in a ChangeQueue.
@@ -4710,7 +4742,7 @@ class QueueItem(zkobject.ZKObject):
live=True, # Whether an item is intended to be processed at all
layout_uuid=None,
_cached_sql_results={},
- event=None, # The trigger event that lead to this queue item
+ event=None, # Info about the event that lead to this queue item
# Additional container for connection specifig information to be
# used by reporters throughout the lifecycle
@@ -4732,6 +4764,9 @@ class QueueItem(zkobject.ZKObject):
def new(klass, context, **kw):
obj = klass()
obj._set(**kw)
+ if COMPONENT_REGISTRY.model_api >= 13:
+ obj._set(event=obj.event and EventInfo.fromEvent(obj.event))
+
data = obj._trySerialize(context)
obj._save(context, data, create=True)
files_state = (BuildSet.COMPLETE if obj.change.files is not None
@@ -4760,10 +4795,18 @@ class QueueItem(zkobject.ZKObject):
return (tenant, pipeline, uuid)
def serialize(self, context):
- if isinstance(self.event, TriggerEvent):
- event_type = "TriggerEvent"
+ if COMPONENT_REGISTRY.model_api < 13:
+ if isinstance(self.event, TriggerEvent):
+ event_type = "TriggerEvent"
+ else:
+ event_type = self.event.__class__.__name__
else:
- event_type = self.event.__class__.__name__
+ event_type = "EventInfo"
+ if not isinstance(self.event, EventInfo):
+ # Convert our local trigger event to a trigger info
+ # object. This will only happen on the transition to
+ # model API version 13.
+ self._set(event=EventInfo.fromEvent(self.event))
data = {
"uuid": self.uuid,
@@ -4805,14 +4848,18 @@ class QueueItem(zkobject.ZKObject):
# child objects.
self._set(uuid=data["uuid"])
- event_type = data["event"]["type"]
- if event_type == "TriggerEvent":
- event_class = (
- self.pipeline.manager.sched.connections.getTriggerEventClass(
- data["event"]["data"]["driver_name"])
- )
+ if COMPONENT_REGISTRY.model_api < 13:
+ event_type = data["event"]["type"]
+ if event_type == "TriggerEvent":
+ event_class = (
+ self.pipeline.manager.sched.connections
+ .getTriggerEventClass(
+ data["event"]["data"]["driver_name"])
+ )
+ else:
+ event_class = EventTypeIndex.event_type_mapping.get(event_type)
else:
- event_class = EventTypeIndex.event_type_mapping.get(event_type)
+ event_class = EventInfo
if event_class is None:
raise NotImplementedError(
diff --git a/zuul/model_api.py b/zuul/model_api.py
index ccb12077d..0244296dd 100644
--- a/zuul/model_api.py
+++ b/zuul/model_api.py
@@ -14,4 +14,4 @@
# When making ZK schema changes, increment this and add a record to
# doc/source/developer/model-changelog.rst
-MODEL_API = 12
+MODEL_API = 13
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index e646c09b8..cd15a878c 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -2519,9 +2519,26 @@ class Scheduler(threading.Thread):
event.span_context = tracing.getSpanContext(span)
for pipeline in tenant.layout.pipelines.values():
+ # For most kinds of dependencies, it's sufficient to check
+ # if this change is already in the pipeline, because the
+ # only way to update a dependency cycle is to update one
+ # of the changes in it. However, dependencies-by-topic
+ # can have changes added to the cycle without updating any
+ # of the existing changes in the cycle. That means in
+ # order to detect whether a new change is added to an
+ # existing cycle in the pipeline, we need to know all of
+ # the dependencies of the new change, and check if *they*
+ # are in the pipeline. Therefore, go ahead and update our
+ # dependencies here so they are available for comparison
+ # against the pipeline contents. This front-loads some
+ # work that otherwise would happen in the pipeline
+ # manager, but the result of the work goes into the change
+ # cache, so it's not wasted; it's just less parallelized.
+ if isinstance(change, Change):
+ pipeline.manager.updateCommitDependencies(change, event)
if (
pipeline.manager.eventMatches(event, change)
- or pipeline.manager.isAnyVersionOfChangeInPipeline(change)
+ or pipeline.manager.isChangeRelevantToPipeline(change)
):
self.pipeline_trigger_events[tenant.name][
pipeline.name
diff --git a/zuul/zk/job_request_queue.py b/zuul/zk/job_request_queue.py
index 175c57b90..7c85ae95e 100644
--- a/zuul/zk/job_request_queue.py
+++ b/zuul/zk/job_request_queue.py
@@ -609,7 +609,7 @@ class JobRequestQueue(ZooKeeperSimpleBase):
self.kazoo_client.delete(lock_path, recursive=True)
except Exception:
self.log.exception(
- "Unable to delete lock %s", path)
+ "Unable to delete lock %s", lock_path)
except Exception:
self.log.exception("Error cleaning up locks %s", self)