diff options
-rw-r--r-- | zuul/manager/__init__.py | 15 | ||||
-rw-r--r-- | zuul/nodepool.py | 18 | ||||
-rw-r--r-- | zuul/scheduler.py | 18 |
3 files changed, 30 insertions, 21 deletions
diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py index 1e3478245..8a6f82e5c 100644 --- a/zuul/manager/__init__.py +++ b/zuul/manager/__init__.py @@ -338,14 +338,16 @@ class PipelineManager(object): return True def dequeueItem(self, item): - self.log.debug("Removing change %s from queue" % item.change) + log = get_annotated_logger(self.log, item.event) + log.debug("Removing change %s from queue", item.change) item.queue.dequeueItem(item) def removeItem(self, item): + log = get_annotated_logger(self.log, item.event) # Remove an item from the queue, probably because it has been # superseded by another change. - self.log.debug("Canceling builds behind change: %s " - "because it is being removed." % item.change) + log.debug("Canceling builds behind change: %s " + "because it is being removed.", item.change) self.cancelJobs(item) self.dequeueItem(item) self.reportStats(item) @@ -436,7 +438,8 @@ class PipelineManager(object): self._executeJobs(item, jobs) def cancelJobs(self, item, prime=True): - self.log.debug("Cancel jobs for change %s" % item.change) + log = get_annotated_logger(self.log, item.event) + log.debug("Cancel jobs for change %s", item.change) canceled = False old_build_set = item.current_build_set jobs_to_cancel = item.getJobs() @@ -448,8 +451,8 @@ class PipelineManager(object): self.sched.cancelJob(old_build_set, job) for item_behind in item.items_behind: - self.log.debug("Canceling jobs for change %s, behind change %s" % - (item_behind.change, item.change)) + log.debug("Canceling jobs for change %s, behind change %s", + item_behind.change, item.change) if self.cancelJobs(item_behind, prime=prime): canceled = True return canceled diff --git a/zuul/nodepool.py b/zuul/nodepool.py index 26e96f929..64450e87d 100644 --- a/zuul/nodepool.py +++ b/zuul/nodepool.py @@ -228,8 +228,9 @@ class Nodepool(object): resources) self.emitStatsResources() - def returnNodeSet(self, nodeset, build=None): - self.log.info("Returning nodeset %s" % (nodeset,)) + def returnNodeSet(self, nodeset, build=None, zuul_event_id=None): + log = get_annotated_logger(self.log, zuul_event_id) + log.info("Returning nodeset %s", nodeset) resources = defaultdict(int) duration = None project = None @@ -242,13 +243,12 @@ class Nodepool(object): build.build_set.item.change and build.build_set.item.change.project): duration = build.end_time - build.start_time - self.log.info("Nodeset %s with %s nodes was in use " - "for %s seconds for build %s for project %s", - nodeset, len(nodeset.nodes), duration, build, - project) + log.info("Nodeset %s with %s nodes was in use " + "for %s seconds for build %s for project %s", + nodeset, len(nodeset.nodes), duration, build, project) for node in nodeset.getNodes(): if node.lock is None: - self.log.error("Node %s is not locked" % (node,)) + log.error("Node %s is not locked", node) else: try: if node.state == model.STATE_IN_USE: @@ -257,8 +257,8 @@ class Nodepool(object): node.state = model.STATE_USED self.sched.zk.storeNode(node) except Exception: - self.log.exception("Exception storing node %s " - "while unlocking:" % (node,)) + log.exception("Exception storing node %s " + "while unlocking:", node) self._unlockNodes(nodeset.getNodes()) # When returning a nodeset we need to update the gauges if we have a diff --git a/zuul/scheduler.py b/zuul/scheduler.py index cd32c1eaa..2c77e0c6a 100644 --- a/zuul/scheduler.py +++ b/zuul/scheduler.py @@ -1278,7 +1278,8 @@ class Scheduler(threading.Thread): except Exception: log.exception("Unable to process autohold for %s" % build) try: - self.nodepool.returnNodeSet(build.nodeset, build) + self.nodepool.returnNodeSet(build.nodeset, build=build, + zuul_event_id=zuul_event_id) except Exception: log.exception("Unable to return nodeset %s" % build.nodeset) @@ -1335,7 +1336,8 @@ class Scheduler(threading.Thread): log.warning("Build set %s is not current " "for node request %s", build_set, request) if request.fulfilled: - self.nodepool.returnNodeSet(request.nodeset) + self.nodepool.returnNodeSet(request.nodeset, + zuul_event_id=request.event_id) return if request.job.name not in [x.name for x in build_set.item.getJobs()]: log.warning("Item %s does not contain job %s " @@ -1343,14 +1345,16 @@ class Scheduler(threading.Thread): build_set.item, request.job.name, request) build_set.removeJobNodeRequest(request.job.name) if request.fulfilled: - self.nodepool.returnNodeSet(request.nodeset) + self.nodepool.returnNodeSet(request.nodeset, + zuul_event_id=request.event_id) return pipeline = build_set.item.pipeline if not pipeline: log.warning("Build set %s is not associated with a pipeline " "for node request %s", build_set, request) if request.fulfilled: - self.nodepool.returnNodeSet(request.nodeset) + self.nodepool.returnNodeSet(request.nodeset, + zuul_event_id=request.event_id) return pipeline.manager.onNodesProvisioned(event) @@ -1456,12 +1460,14 @@ class Scheduler(threading.Thread): if not was_running: nodeset = buildset.getJobNodeSet(job_name) if nodeset: - self.nodepool.returnNodeSet(nodeset, build) + self.nodepool.returnNodeSet( + nodeset, build=build, zuul_event_id=item.event) build.result = 'CANCELED' else: nodeset = buildset.getJobNodeSet(job_name) if nodeset: - self.nodepool.returnNodeSet(nodeset) + self.nodepool.returnNodeSet( + nodeset, zuul_event_id=item.event) if final: # If final is set make sure that the job is not resurrected |