summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2022-07-27 21:14:13 +0000
committerGerrit Code Review <review@openstack.org>2022-07-27 21:14:13 +0000
commitba2f4b8eac744f14df5790fc96cc8c1aff1ab0ff (patch)
tree6fd3892e64b816e203408c436702bf6ba54fcf6a
parent1636ecd23b5e647c16485372262aceef1f83277c (diff)
parentd039cc1258f06891a0e9ab61ca6f6d3d25145c1d (diff)
downloadzuul-ba2f4b8eac744f14df5790fc96cc8c1aff1ab0ff.tar.gz
Merge "Fix deduplicated build references"
-rw-r--r--tests/unit/test_circular_dependencies.py42
-rw-r--r--zuul/model.py76
-rw-r--r--zuul/zk/zkobject.py1
3 files changed, 115 insertions, 4 deletions
diff --git a/tests/unit/test_circular_dependencies.py b/tests/unit/test_circular_dependencies.py
index 202248d09..2223008d5 100644
--- a/tests/unit/test_circular_dependencies.py
+++ b/tests/unit/test_circular_dependencies.py
@@ -1770,6 +1770,48 @@ class TestGerritCircularDependencies(ZuulTestCase):
self.assertEqual(items[1].change.project.name, 'org/project1')
self.assertEqual(len(items[1].current_build_set.retry_builds), 1)
+ @simple_layout('layouts/job-dedup-auto-shared.yaml')
+ def test_job_deduplication_multi_scheduler(self):
+ # Test that a second scheduler can correctly refresh
+ # deduplicated builds
+ self.executor_server.hold_jobs_in_build = True
+ A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A')
+ B = self.fake_gerrit.addFakeChange('org/project1', 'master', 'B')
+
+ # A <-> B
+ A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ A.subject, B.data["url"]
+ )
+ B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format(
+ B.subject, A.data["url"]
+ )
+
+ A.addApproval('Code-Review', 2)
+ B.addApproval('Code-Review', 2)
+
+ self.fake_gerrit.addEvent(A.addApproval('Approved', 1))
+ self.fake_gerrit.addEvent(B.addApproval('Approved', 1))
+
+ self.waitUntilSettled()
+
+ app = self.createScheduler()
+ app.start()
+ self.assertEqual(len(self.scheds), 2)
+
+ # Hold the lock on the first scheduler so that only the second
+ # will act.
+ with self.scheds.first.sched.run_handler_lock:
+ self.executor_server.hold_jobs_in_build = False
+ self.executor_server.release()
+ self.waitUntilSettled(matcher=[app])
+
+ self.assertEqual(A.data['status'], 'MERGED')
+ self.assertEqual(B.data['status'], 'MERGED')
+ self.assertHistory([
+ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"),
+ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"),
+ ], ordered=False)
+
@simple_layout('layouts/job-dedup-noop.yaml')
def test_job_deduplication_noop(self):
# Test that we don't deduplicate noop (there's no good reason
diff --git a/zuul/model.py b/zuul/model.py
index a0e148171..f1edf5d93 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -742,8 +742,56 @@ class PipelineState(zkobject.ZKObject):
"queues": queues,
"old_queues": old_queues,
})
+ if context.build_references:
+ self._fixBuildReferences(data, context)
+ context.build_references = False
return data
+ def _fixBuildReferences(self, data, context):
+ # Reconcile duplicate builds; if we find any BuildReference
+ # objects, look up the actual builds and replace
+ log = context.log
+ build_map = {}
+ to_replace_dicts = []
+ to_replace_lists = []
+ for queue in data['queues'] + data['old_queues']:
+ for item in queue.queue:
+ buildset = item.current_build_set
+ for build_job, build in buildset.builds.items():
+ if isinstance(build, BuildReference):
+ to_replace_dicts.append((item,
+ buildset.builds,
+ build_job,
+ build._path))
+ else:
+ build_map[build.getPath()] = build
+ for job_name, build_list in buildset.retry_builds.items():
+ for build in build_list:
+ if isinstance(build, BuildReference):
+ to_replace_lists.append((item,
+ build_list,
+ build,
+ build._path))
+ else:
+ build_map[build.getPath()] = build
+ for (item, build_dict, build_job, build_path) in to_replace_dicts:
+ orig_build = build_map.get(build_path)
+ if orig_build:
+ build_dict[build_job] = orig_build
+ else:
+ log.warning("Unable to find deduplicated build %s for %s",
+ build_path, item)
+ del build_dict[build_job]
+ for (item, build_list, build, build_path) in to_replace_lists:
+ idx = build_list.index(build)
+ orig_build = build_map.get(build_path)
+ if orig_build:
+ build_list[idx] = build_map[build_path]
+ else:
+ log.warning("Unable to find deduplicated build %s for %s",
+ build_path, item)
+ del build_list[idx]
+
def _getKnownItems(self):
items = []
for queue in (*self.old_queues, *self.queues):
@@ -3424,6 +3472,11 @@ class BuildRequest(JobRequest):
)
+class BuildReference:
+ def __init__(self, _path):
+ self._path = _path
+
+
class Build(zkobject.ZKObject):
"""A Build is an instance of a single execution of a Job.
@@ -3852,6 +3905,13 @@ class BuildSet(zkobject.ZKObject):
}
return json.dumps(data, sort_keys=True).encode("utf8")
+ def _isMyBuild(self, build_path):
+ parts = build_path.split('/')
+ buildset_uuid = parts[-5]
+ if buildset_uuid == self.uuid:
+ return True
+ return False
+
def deserialize(self, raw, context):
data = super().deserialize(raw, context)
# Set our UUID so that getPath() returns the correct path for
@@ -3944,8 +4004,12 @@ class BuildSet(zkobject.ZKObject):
if not build.result:
build.refresh(context)
else:
- build = Build.fromZK(
- context, build_path, job=job, build_set=self)
+ if not self._isMyBuild(build_path):
+ build = BuildReference(build_path)
+ context.build_references = True
+ else:
+ build = Build.fromZK(
+ context, build_path, job=job, build_set=self)
builds[job_name] = build
for retry_path in data["retry_builds"].get(job_name, []):
@@ -3954,8 +4018,12 @@ class BuildSet(zkobject.ZKObject):
# Retry builds never change.
pass
else:
- retry_build = Build.fromZK(
- context, retry_path, job=job, build_set=self)
+ if not self._isMyBuild(retry_path):
+ retry_build = BuildReference(retry_path)
+ context.build_references = True
+ else:
+ retry_build = Build.fromZK(
+ context, retry_path, job=job, build_set=self)
retry_builds[job_name].append(retry_build)
data.update({
diff --git a/zuul/zk/zkobject.py b/zuul/zk/zkobject.py
index aa32b8b9b..8de3f34ba 100644
--- a/zuul/zk/zkobject.py
+++ b/zuul/zk/zkobject.py
@@ -43,6 +43,7 @@ class ZKContext:
self.cumulative_write_znodes = 0
self.cumulative_read_bytes = 0
self.cumulative_write_bytes = 0
+ self.build_references = False
def sessionIsValid(self):
return ((not self.lock or self.lock.is_still_valid()) and