summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames E. Blair <jim@acmegating.com>2022-10-15 11:15:36 -0700
committerJames E. Blair <jim@acmegating.com>2022-10-15 14:19:10 -0700
commitec4c6264ca1694082e30cffcf54ff54ada1d8c2d (patch)
treefbf536d07ee44f5010355124233be6a572d1829c
parent7c9dbc53075501b6e29d8b0d1c2851d4ee330b5d (diff)
downloadzuul-ec4c6264ca1694082e30cffcf54ff54ada1d8c2d.tar.gz
Add JobData refresh test
We try to avoid refreshing JobData from ZK when it is not necessary (because these objects rarely change). However, a bug in the avoidance was recently discovered and in fact we have been refreshing them more than necessary. This adds a test to catch that case, along with fixing an identical bug (the same process is used in FrozenJobs and Builds). The fallout from these bugs may not be exceptionally large, however, since we generally avoid refreshing FrozenJobs once a build has started, and avoid refreshing Builds once they have completed, meaning these bugs may have had little opportunity to show themselves. Change-Id: I41c3451cf2b59ec18a20f49c6daf716de7f6542e
-rw-r--r--tests/base.py2
-rw-r--r--tests/fixtures/layouts/vars.yaml10
-rw-r--r--tests/unit/test_scheduler.py71
-rw-r--r--zuul/model.py2
4 files changed, 80 insertions, 5 deletions
diff --git a/tests/base.py b/tests/base.py
index b85bce504..c814ea44c 100644
--- a/tests/base.py
+++ b/tests/base.py
@@ -5596,6 +5596,8 @@ class ZuulTestCase(BaseTestCase):
ctx = self.createZKContext(lock)
with pipeline.manager.currentContext(ctx):
pipeline.state.refresh(ctx)
+ # return the context in case the caller wants to examine iops
+ return ctx
def _logQueueStatus(self, logger, matcher, all_zk_queues_empty,
all_merge_jobs_waiting, all_builds_reported,
diff --git a/tests/fixtures/layouts/vars.yaml b/tests/fixtures/layouts/vars.yaml
index 88e6a38f7..3cac6f4bb 100644
--- a/tests/fixtures/layouts/vars.yaml
+++ b/tests/fixtures/layouts/vars.yaml
@@ -18,7 +18,14 @@
- job:
name: check-job
- run: playbooks/check.yaml
+ vars:
+ my_var: foo
+ extra-vars:
+ extra_var: bar
+
+- job:
+ name: hold-job
+ dependencies: check-job
vars:
my_var: foo
extra-vars:
@@ -29,3 +36,4 @@
check:
jobs:
- check-job
+ - hold-job
diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py
index 5d8099686..90a5eda02 100644
--- a/tests/unit/test_scheduler.py
+++ b/tests/unit/test_scheduler.py
@@ -6317,16 +6317,80 @@ For CI problems and help debugging, contact ci@example.org"""
self.useFixture(fixtures.MonkeyPatch(
'zuul.model.FrozenJob.MAX_DATA_LEN',
1))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'zuul.model.Build.MAX_DATA_LEN',
+ 1))
+ # Return some data and pause the job. We use a paused job
+ # here because there are only two times we refresh JobData:
+ # 1) A job which has not yet started its build
+ # because the waiting status may change, we refresh the FrozenJob
+ # 2) A job which is paused
+ # because the result/data may change, we refresh the Build
+ # This allows us to test that we re-use JobData instances when
+ # we are able.
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
+ self.executor_server.returnData(
+ 'check-job', A,
+ {'somedata': 'foobar',
+ 'zuul': {'pause': True}},
+ )
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
- # Make sure we're really using JobData objects
tenant = self.scheds.first.sched.abide.tenants.get('tenant-one')
item = tenant.layout.pipelines['check'].queues[0].queue[0]
- job = item.getJobs()[0]
- self.assertTrue(isinstance(job._variables, zuul.model.JobData))
+ hold_job = item.getJobs()[1]
+
+ # Refresh the pipeline so that we can verify the JobData
+ # objects are immutable.
+ old_hold_job_variables = hold_job.variables
+ ctx = self.refreshPipelines(self.scheds.first.sched)
+ new_hold_job_variables = hold_job.variables
+
+ self.executor_server.release('check-job')
+ self.waitUntilSettled()
+ # Waiting on hold-job now
+
+ # Check the assertions on the hold job here so that the test
+ # can fail normally if they fail (it times out otherwise due
+ # to the held job).
+ self.assertEqual('hold-job', hold_job.name)
+ # Make sure we're really using JobData objects
+ self.assertTrue(isinstance(hold_job._variables, zuul.model.JobData))
+ # Make sure the same object instance is used
+ self.assertIs(old_hold_job_variables, new_hold_job_variables)
+ # Hopefully these asserts won't change much over time. If
+ # they don't they may be a good way for us to catch unintended
+ # extra read operations. If they change too much, they may
+ # not be worth keeping and we can just remove them.
+ self.assertEqual(6, ctx.cumulative_read_objects)
+ self.assertEqual(6, ctx.cumulative_read_znodes)
+ self.assertEqual(0, ctx.cumulative_write_objects)
+ self.assertEqual(0, ctx.cumulative_write_znodes)
+
+ check_job = item.getJobs()[0]
+ self.assertEqual('check-job', check_job.name)
+ self.assertTrue(isinstance(check_job._variables,
+ zuul.model.JobData))
+ check_build = item.current_build_set.getBuild('check-job')
+ self.assertTrue(isinstance(check_build._result_data,
+ zuul.model.JobData))
+
+ # Refresh the pipeline so that we can verify the JobData
+ # objects are immutable.
+ old_check_build_results = check_build.result_data
+ ctx = self.refreshPipelines(self.scheds.first.sched)
+ new_check_build_results = check_build.result_data
+
+ # Verify that we did not reload results
+ self.assertIs(old_check_build_results, new_check_build_results)
+
+ # Again check the object read counts
+ self.assertEqual(6, ctx.cumulative_read_objects)
+ self.assertEqual(6, ctx.cumulative_read_znodes)
+ self.assertEqual(0, ctx.cumulative_write_objects)
+ self.assertEqual(0, ctx.cumulative_write_znodes)
self.executor_server.hold_jobs_in_build = False
self.executor_server.release()
@@ -6334,6 +6398,7 @@ For CI problems and help debugging, contact ci@example.org"""
self.assertHistory([
dict(name='check-job', result='SUCCESS', changes='1,1'),
+ dict(name='hold-job', result='SUCCESS', changes='1,1'),
], ordered=False)
diff --git a/zuul/model.py b/zuul/model.py
index 5d3e48081..a1d354c2f 100644
--- a/zuul/model.py
+++ b/zuul/model.py
@@ -3701,7 +3701,7 @@ class Build(zkobject.ZKObject):
# The data are stored locally in this dict
data['_' + job_data_key] = job_data['data']
elif job_data['storage'] == 'offload':
- existing_job_data = getattr(self, job_data_key, None)
+ existing_job_data = getattr(self, f"_{job_data_key}", None)
if (getattr(existing_job_data, 'hash', None) ==
job_data['hash']):
# Re-use the existing object since it's the same