summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2022-03-28 23:24:45 +0000
committerGerrit Code Review <review@openstack.org>2022-03-28 23:24:45 +0000
commitbf266dc2e536ed6a5e335af02903f737102fb561 (patch)
tree90c5c1a702fe9569c2a012f8f04206c1f13ed2ec
parent9bd930110955438ca4b6721c39b6e685e33e1fab (diff)
parentbc1618ed5b683b7c43072e2c26c6a19d6dc8d9f5 (diff)
downloadzuul-bf266dc2e536ed6a5e335af02903f737102fb561.tar.gz
Merge "Make promote work for any pipeline manager"
-rw-r--r--doc/source/client.rst30
-rw-r--r--releasenotes/notes/promote-check-f1ecf4826245c43d.yaml11
-rw-r--r--tests/unit/test_web.py166
-rw-r--r--web/src/containers/status/Change.jsx28
-rw-r--r--zuul/model.py7
-rw-r--r--zuul/scheduler.py79
6 files changed, 264 insertions, 57 deletions
diff --git a/doc/source/client.rst b/doc/source/client.rst
index 98eb517fa..0177c7347 100644
--- a/doc/source/client.rst
+++ b/doc/source/client.rst
@@ -146,18 +146,24 @@ Example::
Note that the format of changes id is <number>,<patchset>.
-The promote action is used to reorder the change queue in a pipeline, by putting
-the provided changes at the top of the queue; therefore this action makes the
-most sense when performed against a dependent pipeline.
-
-The most common use case for the promote action is the need to merge an urgent
-fix when the gate pipeline has already several patches queued ahead. This is
-especially needed if there is concern that one or more changes ahead in the queue
-may fail, thus increasing the time to land for the fix; or concern that the fix
-may not pass validation if applied on top of the current patch queue in the gate.
-
-If the queue of a dependent pipeline is targeted by the promote, all the ongoing
-jobs in that queue will be canceled and restarted on top of the promoted changes.
+The promote action is used to reorder the changes in a pipeline, by
+putting the provided changes at the top of the queue.
+
+The most common use case for the promote action is the need to merge
+an urgent fix when the gate pipeline has several patches queued
+ahead. This is especially needed if there is concern that one or more
+changes ahead in the queue may fail, thus increasing the time to land
+for the fix; or concern that the fix may not pass validation if
+applied on top of the current patch queue in the gate.
+
+Any items in a dependent pipeline which have had items ahead of them
+changed will have their jobs canceled and restarted based on the new
+ordering.
+
+If items in independent pipelines are promoted, no jobs will be
+restarted, but their change queues within the pipeline will be
+re-ordered so that they will be processed first and their node request
+priorities will increase.
tenant-conf-check
^^^^^^^^^^^^^^^^^
diff --git a/releasenotes/notes/promote-check-f1ecf4826245c43d.yaml b/releasenotes/notes/promote-check-f1ecf4826245c43d.yaml
new file mode 100644
index 000000000..a32dba62e
--- /dev/null
+++ b/releasenotes/notes/promote-check-f1ecf4826245c43d.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ The promote administrative action now functions with all pipeline
+ managers. Previously it would only have an impact on dependent
+ pipelines, but it will now re-order change queues as well as
+ changes within any type of pipeline.
+upgrade:
+ - |
+ The promote administrative action will no longer restart jobs for
+ changes which have not been re-ordered within their change queue.
diff --git a/tests/unit/test_web.py b/tests/unit/test_web.py
index 0d0a6916d..83620320b 100644
--- a/tests/unit/test_web.py
+++ b/tests/unit/test_web.py
@@ -2213,6 +2213,172 @@ class TestTenantScopedWebApi(BaseTestWeb):
self.assertEqual(B.reported, 2)
self.assertEqual(C.data['status'], 'MERGED')
self.assertEqual(C.reported, 2)
+ self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 1)
+ self.assertEqual(len(self.history), 10)
+
+ def test_promote_no_change(self):
+ """Test that jobs are not unecessarily restarted when promoting"""
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+ C.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(C.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ items = tenant.layout.pipelines['gate'].getAllItems()
+ enqueue_times = {}
+ for item in items:
+ enqueue_times[str(item.change)] = item.enqueue_time
+
+ # REST API
+ args = {'pipeline': 'gate',
+ 'changes': ['1,1', '2,1', '3,1']}
+ authz = {'iss': 'zuul_operator',
+ 'aud': 'zuul.example.com',
+ 'sub': 'testuser',
+ 'zuul': {
+ 'admin': ['tenant-one', ],
+ },
+ 'exp': time.time() + 3600,
+ 'iat': time.time()}
+ token = jwt.encode(authz, key='NoDanaOnlyZuul',
+ algorithm='HS256')
+ req = self.post_url(
+ 'api/tenant/tenant-one/promote',
+ headers={'Authorization': 'Bearer %s' % token},
+ json=args)
+ self.assertEqual(200, req.status_code, req.text)
+ data = req.json()
+ self.assertEqual(True, data)
+
+ # ensure that enqueue times are durable
+ items = tenant.layout.pipelines['gate'].getAllItems()
+ for item in items:
+ self.assertEqual(
+ enqueue_times[str(item.change)], item.enqueue_time)
+
+ self.waitUntilSettled()
+ self.executor_server.release('.*-merge')
+ self.waitUntilSettled()
+ self.executor_server.release('.*-merge')
+ self.waitUntilSettled()
+ self.executor_server.release('.*-merge')
+ self.waitUntilSettled()
+
+ self.assertEqual(len(self.builds), 6)
+ self.assertEqual(self.builds[0].name, 'project-test1')
+ self.assertEqual(self.builds[1].name, 'project-test2')
+ self.assertEqual(self.builds[2].name, 'project-test1')
+ self.assertEqual(self.builds[3].name, 'project-test2')
+ self.assertEqual(self.builds[4].name, 'project-test1')
+ self.assertEqual(self.builds[5].name, 'project-test2')
+
+ self.assertTrue(self.builds[0].hasChanges(A))
+ self.assertFalse(self.builds[0].hasChanges(B))
+ self.assertFalse(self.builds[0].hasChanges(C))
+
+ self.assertTrue(self.builds[2].hasChanges(A))
+ self.assertTrue(self.builds[2].hasChanges(B))
+ self.assertFalse(self.builds[2].hasChanges(C))
+
+ self.assertTrue(self.builds[4].hasChanges(A))
+ self.assertTrue(self.builds[4].hasChanges(B))
+ self.assertTrue(self.builds[4].hasChanges(C))
+
+ self.executor_server.release()
+ self.waitUntilSettled()
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(A.reported, 2)
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.assertEqual(B.reported, 2)
+ self.assertEqual(C.data['status'], 'MERGED')
+ self.assertEqual(C.reported, 2)
+ # The promote should be a noop, so no canceled jobs
+ self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 0)
+ self.assertEqual(len(self.history), 9)
+
+ def test_promote_check(self):
+ """Test that a change can be promoted via the admin web interface"""
+ self.executor_server.hold_jobs_in_build = True
+ # Make a patch series so that we have some non-live items in
+ # the pipeline and we can make sure they are not promoted.
+ A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
+ B.setDependsOn(A, 1)
+ C = self.fake_gerrit.addFakeChange('org/project', 'master', 'C')
+ C.setDependsOn(B, 1)
+
+ self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
+ self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1))
+
+ self.waitUntilSettled()
+
+ tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
+ items = [i for i in tenant.layout.pipelines['check'].getAllItems()
+ if i.live]
+ enqueue_times = {}
+ for item in items:
+ enqueue_times[str(item.change)] = item.enqueue_time
+
+ # REST API
+ args = {'pipeline': 'check',
+ 'changes': ['2,1', '3,1']}
+ authz = {'iss': 'zuul_operator',
+ 'aud': 'zuul.example.com',
+ 'sub': 'testuser',
+ 'zuul': {
+ 'admin': ['tenant-one', ],
+ },
+ 'exp': time.time() + 3600,
+ 'iat': time.time()}
+ token = jwt.encode(authz, key='NoDanaOnlyZuul',
+ algorithm='HS256')
+ req = self.post_url(
+ 'api/tenant/tenant-one/promote',
+ headers={'Authorization': 'Bearer %s' % token},
+ json=args)
+ self.assertEqual(200, req.status_code, req.text)
+ data = req.json()
+ self.assertEqual(True, data)
+ self.waitUntilSettled()
+
+ # ensure that enqueue times are durable
+ items = [i for i in tenant.layout.pipelines['check'].getAllItems()
+ if i.live]
+ for item in items:
+ self.assertEqual(
+ enqueue_times[str(item.change)], item.enqueue_time)
+
+ # We can't reliably test for side effects in the check
+ # pipeline since the change queues are independent, so we
+ # directly examine the queues.
+ queue_items = [(item.change.number, item.live) for item in
+ tenant.layout.pipelines['check'].getAllItems()]
+ expected = [('1', False),
+ ('2', True),
+ ('1', False),
+ ('2', False),
+ ('3', True),
+ ('1', True)]
+ self.assertEqual(expected, queue_items)
+
+ self.executor_server.release('.*-merge')
+ self.waitUntilSettled()
+ self.executor_server.release()
+ self.waitUntilSettled()
+ # No jobs should be canceled
+ self.assertEqual(self.countJobResults(self.history, 'ABORTED'), 0)
+ self.assertEqual(len(self.history), 9)
def test_tenant_authorizations_override(self):
"""Test that user gets overriden tenant authz if allowed"""
diff --git a/web/src/containers/status/Change.jsx b/web/src/containers/status/Change.jsx
index be488423b..c90d6bdfd 100644
--- a/web/src/containers/status/Change.jsx
+++ b/web/src/containers/status/Change.jsx
@@ -166,7 +166,7 @@ class Change extends React.Component {
renderAdminCommands(idx) {
const { showAdminActions } = this.state
- const { queue, pipeline } = this.props
+ const { queue } = this.props
const dropdownCommands = [
<DropdownItem
key="dequeue"
@@ -179,22 +179,18 @@ class Change extends React.Component {
this.setState(() => ({ showDequeueModal: true }))
}}
>Dequeue</DropdownItem>,
+ <DropdownItem
+ key="promote"
+ icon={<AngleDoubleUpIcon style={{
+ color: 'var(--pf-global--default-color--200)',
+ }} />}
+ description="Promote this change to the top of the queue"
+ onClick={(event) => {
+ event.preventDefault()
+ this.setState(() => ({ showPromoteModal: true }))
+ }}
+ >Promote</DropdownItem>
]
- if (pipeline.manager === 'dependent') {
- dropdownCommands.push(
- <DropdownItem
- key="promote"
- icon={<AngleDoubleUpIcon style={{
- color: 'var(--pf-global--default-color--200)',
- }} />}
- description="Promote this change to the top of the queue"
- onClick={(event) => {
- event.preventDefault()
- this.setState(() => ({ showPromoteModal: true }))
- }}
- >Promote</DropdownItem>
- )
- }
return (
<Dropdown
title='Actions'
diff --git a/zuul/model.py b/zuul/model.py
index 450191df2..4fd1b65c1 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -524,6 +524,13 @@ class Pipeline(object):
self.queues.remove(queue)
queue.delete(self.manager.current_context)
+ def promoteQueue(self, queue):
+ if queue not in self.queues:
+ return
+ with self.state.activeContext(self.manager.current_context):
+ self.queues.remove(queue)
+ self.queues.insert(0, queue)
+
def getChangesInQueue(self):
changes = []
for shared_queue in self.queues:
diff --git a/zuul/scheduler.py b/zuul/scheduler.py
index 73154d322..9cc72e0f9 100644
--- a/zuul/scheduler.py
+++ b/zuul/scheduler.py
@@ -25,7 +25,7 @@ import traceback
import uuid
from contextlib import suppress
from zuul.vendor.contextlib import nullcontext
-from collections import defaultdict
+from collections import defaultdict, OrderedDict
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
@@ -1668,39 +1668,60 @@ class Scheduler(threading.Thread):
change_ids = [c.split(',') for c in event.change_ids]
items_to_enqueue = []
change_queue = None
- for shared_queue in pipeline.queues:
- if change_queue:
- break
- for item in shared_queue.queue:
- if (item.change.number == change_ids[0][0] and
- item.change.patchset == change_ids[0][1]):
- change_queue = shared_queue
- break
- if not change_queue:
- raise Exception("Unable to find shared change queue for %s" %
- event.change_ids[0])
+
+ # A list of (queue, items); the queue should be promoted
+ # within the pipeline, and the items should be promoted within
+ # the queue.
+ promote_operations = OrderedDict()
for number, patchset in change_ids:
found = False
- for item in change_queue.queue:
- if (item.change.number == number and
+ for shared_queue in pipeline.queues:
+ for item in shared_queue.queue:
+ if not item.live:
+ continue
+ if (item.change.number == number and
item.change.patchset == patchset):
- found = True
- items_to_enqueue.append(item)
+ promote_operations.setdefault(
+ shared_queue, []).append(item)
+ found = True
+ break
+ if found:
break
if not found:
- raise Exception("Unable to find %s,%s in queue %s" %
- (number, patchset, change_queue))
- for item in change_queue.queue[:]:
- if item not in items_to_enqueue:
- items_to_enqueue.append(item)
- pipeline.manager.cancelJobs(item)
- pipeline.manager.dequeueItem(item)
- for item in items_to_enqueue:
- pipeline.manager.addChange(
- item.change, event,
- enqueue_time=item.enqueue_time,
- quiet=True,
- ignore_requirements=True)
+ raise Exception("Unable to find %s,%s" % (number, patchset))
+
+ # Reverse the promote operations so that we promote the first
+ # change queue last (which will put it at the top).
+ for change_queue, items_to_enqueue in\
+ reversed(promote_operations.items()):
+ # We can leave changes in the pipeline as long as the head
+ # remains identical; as soon as we make a change, we have
+ # to re-enqueue everything behind it in order to ensure
+ # dependencies are correct.
+ head_same = True
+ for item in change_queue.queue[:]:
+ if item.live and item == items_to_enqueue[0] and head_same:
+ # We can skip this one and leave it in place
+ items_to_enqueue.pop(0)
+ continue
+ elif not item.live:
+ # Ignore this; if the actual item behind it is
+ # dequeued, it will get cleaned up eventually.
+ continue
+ if item not in items_to_enqueue:
+ items_to_enqueue.append(item)
+ head_same = False
+ pipeline.manager.cancelJobs(item)
+ pipeline.manager.dequeueItem(item)
+
+ for item in items_to_enqueue:
+ pipeline.manager.addChange(
+ item.change, item.event,
+ enqueue_time=item.enqueue_time,
+ quiet=True,
+ ignore_requirements=True)
+ # Regardless, move this shared change queue to the head.
+ pipeline.promoteQueue(change_queue)
def _doDequeueEvent(self, event):
tenant = self.abide.tenants.get(event.tenant_name)