1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import logging
import textwrap
import time
import urllib
from abc import ABCMeta, abstractmethod
from zuul import exceptions
from zuul import model
from zuul.lib.dependson import find_dependency_headers
from zuul.lib.logutil import get_annotated_logger
from zuul.lib.tarjan import strongly_connected_components
import zuul.lib.tracing as tracing
from zuul.model import (
Change, DequeueEvent, PipelineState, PipelineChangeList, QueueItem,
PipelinePostConfigEvent,
)
from zuul.zk.change_cache import ChangeKey
from zuul.zk.components import COMPONENT_REGISTRY
from zuul.zk.locks import pipeline_lock
from opentelemetry import trace
class DynamicChangeQueueContextManager(object):
def __init__(self, change_queue):
self.change_queue = change_queue
def __enter__(self):
return self.change_queue
def __exit__(self, etype, value, tb):
if self.change_queue and not self.change_queue.queue:
self.change_queue.pipeline.removeQueue(self.change_queue)
class StaticChangeQueueContextManager(object):
def __init__(self, change_queue):
self.change_queue = change_queue
def __enter__(self):
return self.change_queue
def __exit__(self, etype, value, tb):
pass
class PipelineManager(metaclass=ABCMeta):
"""Abstract Base Class for enqueing and processing Changes in a Pipeline"""
def __init__(self, sched, pipeline):
self.log = logging.getLogger("zuul.Pipeline.%s.%s" %
(pipeline.tenant.name,
pipeline.name,))
self.sched = sched
self.pipeline = pipeline
self.event_filters = []
self.ref_filters = []
# Cached dynamic layouts (layout uuid -> layout)
self._layout_cache = {}
# A small local cache to avoid hitting the ZK-based connection
# change cache for multiple hits in the same pipeline run.
self._change_cache = {}
# Current ZK context when the pipeline is locked
self.current_context = None
# The pipeline summary used by zuul-web that is updated by the
# schedulers after processing a pipeline.
self.pipeline.summary = model.PipelineSummary()
self.pipeline.summary._set(pipeline=self.pipeline)
if sched:
self.sql = sched.sql
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.pipeline.name)
@contextlib.contextmanager
def currentContext(self, ctx):
try:
self.current_context = ctx
yield
finally:
self.current_context = None
def _postConfig(self):
layout = self.pipeline.tenant.layout
# If our layout UUID already matches the UUID in ZK, we don't
# need to make any changes in ZK. But we do still need to
# update our local object pointers. Note that our local queue
# state may still be out of date after this because we skip
# the refresh.
self.buildChangeQueues(layout)
ctx = self.sched.createZKContext(None, self.log)
with self.currentContext(ctx):
if layout.uuid == PipelineState.peekLayoutUUID(self.pipeline):
self.pipeline.state = PipelineState()
self.pipeline.state._set(pipeline=self.pipeline)
self.pipeline.change_list = PipelineChangeList()
self.pipeline.change_list._set(pipeline=self.pipeline)
return
with pipeline_lock(
self.sched.zk_client, self.pipeline.tenant.name, self.pipeline.name
) as lock:
ctx = self.sched.createZKContext(lock, self.log)
with self.currentContext(ctx):
# Since the layout UUID is new, this will move queues
# to "old_queues" and refresh the pipeline state as a
# side effect.
self.pipeline.state = PipelineState.resetOrCreate(
self.pipeline, layout.uuid)
self.pipeline.change_list = PipelineChangeList.create(
self.pipeline)
event = PipelinePostConfigEvent()
self.sched.pipeline_management_events[
self.pipeline.tenant.name][self.pipeline.name].put(
event, needs_result=False)
def buildChangeQueues(self, layout):
self.log.debug("Building relative_priority queues")
change_queues = self.pipeline.relative_priority_queues
tenant = self.pipeline.tenant
layout_project_configs = layout.project_configs
for project_name, project_configs in layout_project_configs.items():
(trusted, project) = tenant.getProject(project_name)
queue_name = None
project_in_pipeline = False
for project_config in layout.getAllProjectConfigs(project_name):
project_pipeline_config = project_config.pipelines.get(
self.pipeline.name)
if not queue_name:
queue_name = project_config.queue_name
if project_pipeline_config is None:
continue
project_in_pipeline = True
if not project_in_pipeline:
continue
if not queue_name:
continue
if queue_name in change_queues:
change_queue = change_queues[queue_name]
else:
change_queue = []
change_queues[queue_name] = change_queue
self.log.debug("Created queue: %s" % queue_name)
change_queue.append(project)
self.log.debug("Added project %s to queue: %s" %
(project, queue_name))
def getSubmitAllowNeeds(self):
# Get a list of code review labels that are allowed to be
# "needed" in the submit records for a change, with respect
# to this queue. In other words, the list of review labels
# this queue itself is likely to set before submitting.
allow_needs = set()
for action_reporter in self.pipeline.success_actions:
allow_needs.update(action_reporter.getSubmitAllowNeeds())
return allow_needs
def eventMatches(self, event, change):
log = get_annotated_logger(self.log, event)
if event.forced_pipeline:
if event.forced_pipeline == self.pipeline.name:
log.debug("Event %s for change %s was directly assigned "
"to pipeline %s" % (event, change, self))
return True
else:
return False
for ef in self.event_filters:
match_result = ef.matches(event, change)
if match_result:
log.debug("Event %s for change %s matched %s "
"in pipeline %s" % (event, change, ef, self))
return True
else:
log.debug("Event %s for change %s does not match %s "
"in pipeline %s because %s" % (
event, change, ef, self, str(match_result)))
return False
def getNodePriority(self, item):
queue = self.pipeline.getRelativePriorityQueue(item.change.project)
items = self.pipeline.getAllItems()
items = [i for i in items
if i.change.project in queue and
i.live]
index = items.index(item)
# Quantize on a logarithmic scale so that we don't constantly
# needlessly adjust thousands of node requests.
# If we're in the top 10 changes, return the accurate number.
if index < 10:
return index
# After 10, batch then in groups of 10 (so items 10-19 are all
# at node priority 10, 20-29 at 20, etc).
if index < 100:
return index // 10 * 10
# After 100, batch in groups of 100.
return index // 100 * 100
def resolveChangeReferences(self, change_references):
return self.resolveChangeKeys(
[ChangeKey.fromReference(r) for r in change_references])
def resolveChangeKeys(self, change_keys):
resolved_changes = []
for key in change_keys:
change = self._change_cache.get(key.reference)
if change is None:
source = self.sched.connections.getSource(key.connection_name)
change = source.getChange(key)
if change is None:
self.log.error("Unable to resolve change from key %s", key)
if (isinstance(change, model.Change)
and change.commit_needs_changes is None):
self.updateCommitDependencies(change, None, event=None)
self._change_cache[change.cache_key] = change
resolved_changes.append(change)
return resolved_changes
def clearCache(self):
self._change_cache.clear()
def _maintainCache(self):
active_layout_uuids = set()
referenced_change_keys = set()
for item in self.pipeline.getAllItems():
if item.layout_uuid:
active_layout_uuids.add(item.layout_uuid)
if isinstance(item.change, model.Change):
referenced_change_keys.update(item.change.needs_changes)
referenced_change_keys.update(item.change.needed_by_changes)
# Clean up unused layouts in the cache
unused_layouts = set(self._layout_cache.keys()) - active_layout_uuids
if unused_layouts:
self.log.debug("Removing unused layouts %s from cache",
unused_layouts)
for uid in unused_layouts:
with contextlib.suppress(KeyError):
del self._layout_cache[uid]
# Clean up change cache
unused_keys = set(self._change_cache.keys()) - referenced_change_keys
for key in unused_keys:
with contextlib.suppress(KeyError):
del self._change_cache[key]
def isChangeAlreadyInPipeline(self, change):
# Checks live items in the pipeline
for item in self.pipeline.getAllItems():
if item.live and change.equals(item.change):
return True
return False
def isAnyVersionOfChangeInPipeline(self, change):
# Checks any items in the pipeline
for change_key in self.pipeline.change_list.getChangeKeys():
if change.cache_stat.key.isSameChange(change_key):
return True
return False
def isChangeAlreadyInQueue(self, change, change_queue):
# Checks any item in the specified change queue
for item in change_queue.queue:
if change.equals(item.change):
return True
return False
def refreshDeps(self, change, event):
if not isinstance(change, model.Change):
return
change_in_pipeline = False
for item in self.pipeline.getAllItems():
if not isinstance(change, model.Change):
continue
for dep_change_ref in item.change.commit_needs_changes:
if item.change.equals(change):
change_in_pipeline = True
dep_change_key = ChangeKey.fromReference(dep_change_ref)
if dep_change_key.isSameChange(change.cache_stat.key):
self.updateCommitDependencies(item.change, None, event)
if change_in_pipeline:
self.updateCommitDependencies(change, None, event)
def reportEnqueue(self, item):
if not self.pipeline.state.disabled:
self.log.info("Reporting enqueue, action %s item %s" %
(self.pipeline.enqueue_actions, item))
ret = self.sendReport(self.pipeline.enqueue_actions, item)
if ret:
self.log.error("Reporting item enqueued %s received: %s" %
(item, ret))
def reportStart(self, item):
if not self.pipeline.state.disabled:
self.log.info("Reporting start, action %s item %s" %
(self.pipeline.start_actions, item))
ret = self.sendReport(self.pipeline.start_actions, item)
if ret:
self.log.error("Reporting item start %s received: %s" %
(item, ret))
def reportNormalBuildsetEnd(self, build_set, action, final, result=None):
# Report a buildset end if there are jobs or errors
if ((build_set.job_graph and len(build_set.job_graph.jobs) > 0) or
build_set.config_errors or
build_set.unable_to_merge):
self.sql.reportBuildsetEnd(build_set, action,
final, result)
def reportDequeue(self, item):
if not self.pipeline.state.disabled:
self.log.info(
"Reporting dequeue, action %s item%s",
self.pipeline.dequeue_actions,
item,
)
ret = self.sendReport(self.pipeline.dequeue_actions, item)
if ret:
self.log.error(
"Reporting item dequeue %s received: %s", item, ret
)
# This might be called after canceljobs, which also sets a
# non-final 'cancel' result.
self.reportNormalBuildsetEnd(item.current_build_set, 'dequeue',
final=False)
def sendReport(self, action_reporters, item, phase1=True, phase2=True):
"""Sends the built message off to configured reporters.
Takes the action_reporters and item and sends them to the
pluggable reporters.
"""
log = get_annotated_logger(self.log, item.event)
report_errors = []
if len(action_reporters) > 0:
for reporter in action_reporters:
try:
ret = reporter.report(item, phase1=phase1, phase2=phase2)
if ret:
report_errors.append(ret)
except Exception as e:
item.setReportedResult('ERROR')
log.exception("Exception while reporting")
report_errors.append(str(e))
return report_errors
def isChangeReadyToBeEnqueued(self, change, event):
return True
def enqueueChangesAhead(self, change, event, quiet, ignore_requirements,
change_queue, history=None, dependency_graph=None,
warnings=None):
return True
def enqueueChangesBehind(self, change, event, quiet, ignore_requirements,
change_queue, history=None,
dependency_graph=None):
return True
def getMissingNeededChanges(self, change, change_queue, event,
dependency_graph=None):
"""Check that all needed changes are ahead in the queue.
Return a list of any that are missing. If it is not possible
to correct the missing changes, "abort" will be true.
:returns: (abort, needed_changes)
"""
return False, []
def getFailingDependentItems(self, item, nnfi):
return None
def getItemForChange(self, change, change_queue=None):
if change_queue is not None:
items = change_queue.queue
else:
items = self.pipeline.getAllItems()
for item in items:
if item.change.equals(change):
return item
return None
def findOldVersionOfChangeAlreadyInQueue(self, change):
for item in self.pipeline.getAllItems():
if not item.live:
continue
if change.isUpdateOf(item.change):
return item
return None
def removeOldVersionsOfChange(self, change, event):
if not self.pipeline.dequeue_on_new_patchset:
return
old_item = self.findOldVersionOfChangeAlreadyInQueue(change)
if old_item:
log = get_annotated_logger(self.log, event)
log.debug("Change %s is a new version of %s, removing %s",
change, old_item.change, old_item)
self.removeItem(old_item)
def removeAbandonedChange(self, change, event):
log = get_annotated_logger(self.log, event)
log.debug("Change %s abandoned, removing." % change)
for item in self.pipeline.getAllItems():
if not item.live:
continue
if item.change.equals(change):
self.removeItem(item)
@abstractmethod
def getChangeQueue(self, change, event, existing=None):
pass
def reEnqueueItem(self, item, last_head, old_item_ahead, item_ahead_valid):
log = get_annotated_logger(self.log, item.event)
with self.getChangeQueue(item.change, item.event,
last_head.queue) as change_queue:
if change_queue:
log.debug("Re-enqueing change %s in queue %s",
item.change, change_queue)
change_queue.enqueueItem(item)
# If the old item ahead was re-enqued, this value will
# be true, so we should attempt to move the item back
# to where it was in case an item ahead is already
# failing.
if item_ahead_valid:
change_queue.moveItem(item, old_item_ahead)
# Get an updated copy of the layout, but if we have a
# job graph already, then keep it (our repo state and
# jobs are frozen and will now only update if the item
# ahead changes). This resumes the buildset merge
# state machine. If we have an up-to-date layout, it
# will go ahead and refresh the job graph if there
# isn't one; or it will send a new merge job if
# necessary, or it will do nothing if we're waiting on
# a merge job.
has_job_graph = bool(item.current_build_set.job_graph)
if item.live:
# Only reset the layout for live items as we don't need to
# re-create the layout in independent pipelines.
item.updateAttributes(self.current_context,
layout_uuid=None)
# If the item is no longer active, but has a job graph we
# will make sure to update it.
if item.active or has_job_graph:
self.prepareItem(item)
# Re-set build results in case any new jobs have been
# added to the tree.
for build in item.current_build_set.getBuilds():
if build.result:
item.setResult(build)
# Similarly, reset the item state.
if item.current_build_set.unable_to_merge:
item.setUnableToMerge()
if item.current_build_set.config_errors:
item.setConfigErrors(item.getConfigErrors())
if item.dequeued_needing_change:
item.setDequeuedNeedingChange()
if item.dequeued_missing_requirements:
item.setDequeuedMissingRequirements()
# It can happen that all in-flight builds have been removed
# which would lead to paused parent jobs not being resumed.
# To prevent that resume parent jobs if necessary.
self._resumeBuilds(item.current_build_set)
self.reportStats(item)
return True
else:
log.error("Unable to find change queue for project %s",
item.change.project)
return False
def addChange(self, change, event, quiet=False, enqueue_time=None,
ignore_requirements=False, live=True,
change_queue=None, history=None, dependency_graph=None):
log = get_annotated_logger(self.log, event)
log.debug("Considering adding change %s" % change)
history = history if history is not None else []
log.debug("History: %s", history)
# Ensure the dependency graph is created when the first change is
# processed to allow cycle detection with the Tarjan algorithm
dependency_graph = dependency_graph or collections.OrderedDict()
log.debug("Dependency graph: %s", dependency_graph)
# If we are adding a live change, check if it's a live item
# anywhere in the pipeline. Otherwise, we will perform the
# duplicate check below on the specific change_queue.
if live and self.isChangeAlreadyInPipeline(change):
log.debug("Change %s is already in pipeline, ignoring" % change)
return True
if not ignore_requirements:
for f in self.ref_filters:
if f.connection_name != change.project.connection_name:
log.debug("Filter %s skipped for change %s due "
"to mismatched connections" % (f, change))
continue
match_result = f.matches(change)
if not match_result:
log.debug("Change %s does not match pipeline "
"requirement %s because %s" % (
change, f, str(match_result)))
return False
if not self.isChangeReadyToBeEnqueued(change, event):
log.debug("Change %s is not ready to be enqueued, ignoring" %
change)
return False
# We know this change isn't in this pipeline, but it may be in
# others. If it is, then presumably its commit_needs are up
# to date and this is a noop; otherwise, we need to refresh
# them anyway.
if isinstance(change, model.Change):
self.updateCommitDependencies(change, None, event)
with self.getChangeQueue(change, event, change_queue) as change_queue:
if not change_queue:
log.debug("Unable to find change queue for "
"change %s in project %s" %
(change, change.project))
return False
warnings = []
if not self.enqueueChangesAhead(change, event, quiet,
ignore_requirements,
change_queue, history=history,
dependency_graph=dependency_graph,
warnings=warnings):
self.dequeueIncompleteCycle(change, dependency_graph, event,
change_queue)
log.debug("Failed to enqueue changes ahead of %s" % change)
if warnings:
self._reportNonEqueuedItem(change_queue, change,
event, warnings)
return False
log.debug("History after enqueuing changes ahead: %s", history)
if self.isChangeAlreadyInQueue(change, change_queue):
log.debug("Change %s is already in queue, ignoring" % change)
return True
cycle = []
if hasattr(change, "needs_changes"):
cycle = self.cycleForChange(change, dependency_graph, event)
if cycle and not self.canProcessCycle(change.project):
log.info("Dequeing change %s since at least one project "
"does not allow circular dependencies", change)
warnings = ["Dependency cycle detected"]
self._reportNonEqueuedItem(change_queue,
cycle[-1], event, warnings)
return False
log.info("Adding change %s to queue %s in %s" %
(change, change_queue, self.pipeline))
if enqueue_time is None:
enqueue_time = time.time()
span_info = tracing.startSavedSpan(
'QueueItem', start_time=enqueue_time)
item = change_queue.enqueueChange(change, event,
span_info=span_info,
enqueue_time=enqueue_time)
self.updateBundle(item, change_queue, cycle)
with item.activeContext(self.current_context):
if enqueue_time:
item.enqueue_time = enqueue_time
item.live = live
self.reportStats(item, added=True)
item.quiet = quiet
if item.live:
self.reportEnqueue(item)
# Items in a dependency cycle are expected to be enqueued after
# each other. To prevent non-cycle items from being enqueued
# between items of the same cycle, enqueue items behind each item
# in the cycle once all items in the cycle are enqueued.
if all([self.isChangeAlreadyInQueue(c, change_queue)
for c in cycle]):
if cycle:
self.log.debug("Cycle complete, enqueing changes behind")
for c in cycle or [change]:
self.enqueueChangesBehind(c, event, quiet,
ignore_requirements,
change_queue, history,
dependency_graph)
zuul_driver = self.sched.connections.drivers['zuul']
tenant = self.pipeline.tenant
zuul_driver.onChangeEnqueued(
tenant, item.change, self.pipeline, event)
self.dequeueSupercededItems(item)
return True
def _reportNonEqueuedItem(self, change_queue, change, event, warnings):
# Enqueue an item which otherwise can not be enqueued in order
# to report a message to the user.
actions = self.pipeline.failure_actions
ci = change_queue.enqueueChange(change, event)
for w in warnings:
ci.warning(w)
ci.setReportedResult('FAILURE')
# Only report the item if the project is in the current
# pipeline. Otherwise the change could be spammed by
# reports from unrelated pipelines.
if self.pipeline.tenant.layout.getProjectPipelineConfig(ci):
self.sendReport(actions, ci)
self.dequeueItem(ci)
# We don't use reportNormalBuildsetEnd here because we want to
# report even with no jobs.
self.sql.reportBuildsetEnd(ci.current_build_set,
'failure', final=True)
def cycleForChange(self, change, dependency_graph, event):
log = get_annotated_logger(self.log, event)
log.debug("Running Tarjan's algorithm on current dependencies: %s",
dependency_graph)
sccs = [s for s in strongly_connected_components(dependency_graph)
if len(s) > 1]
log.debug("Strongly connected components (cyles): %s", sccs)
for scc in sccs:
if change in scc:
log.debug("Dependency cycle detected for "
"change %s in project %s",
change, change.project)
# Change can not be part of multiple cycles, so we can return
return scc
return []
def getQueueConfig(self, project):
layout = self.pipeline.tenant.layout
queue_name = None
for project_config in layout.getAllProjectConfigs(
project.canonical_name
):
if not queue_name:
queue_name = project_config.queue_name
project_pipeline_config = project_config.pipelines.get(
self.pipeline.name)
if project_pipeline_config is None:
continue
return layout.queues.get(queue_name)
def canProcessCycle(self, project):
queue_config = self.getQueueConfig(project)
if queue_config is None:
return False
return queue_config.allow_circular_dependencies
def useDependenciesByTopic(self, project):
queue_config = self.getQueueConfig(project)
if queue_config is None:
return False
return queue_config.dependencies_by_topic
def getNonMergeableCycleChanges(self, bundle):
"""Return changes in the cycle that do not fulfill
the pipeline's ready criteria."""
return []
def updateBundle(self, item, change_queue, cycle):
if not cycle:
return
log = get_annotated_logger(self.log, item.event)
item.updateAttributes(self.current_context, bundle=model.Bundle())
# Try to find already enqueued items of this cycle, so we use
# the same bundle
for needed_change in (c for c in cycle if c is not item.change):
needed_item = self.getItemForChange(needed_change, change_queue)
if not needed_item:
continue
# Use a common bundle for the cycle
item.updateAttributes(self.current_context,
bundle=needed_item.bundle)
break
log.info("Adding cycle item %s to bundle %s", item, item.bundle)
bundle = item.bundle
bundle.add_item(item)
# Write out the updated bundle info to Zookeeper for all items
# since it may have mutated since our last write.
for bundle_item in bundle.items:
bundle_item.updateAttributes(self.current_context,
bundle=bundle)
def dequeueIncompleteCycle(self, change, dependency_graph, event,
change_queue):
log = get_annotated_logger(self.log, event)
cycle = self.cycleForChange(change, dependency_graph, event)
enqueued_cycle_items = [i for i in (self.getItemForChange(c,
change_queue)
for c in cycle) if i is not None]
if enqueued_cycle_items:
log.info("Dequeuing incomplete cycle items: %s",
enqueued_cycle_items)
for cycle_item in enqueued_cycle_items:
self.dequeueItem(cycle_item)
def dequeueItem(self, item):
log = get_annotated_logger(self.log, item.event)
log.debug("Removing change %s from queue", item.change)
# In case a item is dequeued that doesn't have a result yet
# (success/failed/...) we report it as dequeued.
# Without this check, all items with a valid result would be reported
# twice.
if not item.current_build_set.result and item.live:
item.setReportedResult('DEQUEUED')
self.reportDequeue(item)
item.queue.dequeueItem(item)
span_attrs = {
'zuul_event_id': item.event.zuul_event_id,
}
for k, v in item.change.getSafeAttributes().toDict().items():
span_attrs['ref_' + k] = v
tracing.endSavedSpan(item.current_build_set.span_info)
tracing.endSavedSpan(item.span_info,
attributes=span_attrs)
def removeItem(self, item):
log = get_annotated_logger(self.log, item.event)
# Remove an item from the queue, probably because it has been
# superseded by another change.
log.debug("Canceling builds behind change: %s "
"because it is being removed.", item.change)
self.cancelJobs(item)
self.dequeueItem(item)
self.reportStats(item)
if item.bundle is None:
return
log.debug("Dequeueing items in bundle %s", item.bundle)
bundle_iter = (i for i in item.bundle.items if i is not item)
for bundle_item in bundle_iter:
self.cancelJobs(bundle_item)
self.dequeueItem(bundle_item)
self.reportStats(bundle_item)
def dequeueSupercededItems(self, item):
change_id = (
item.change._id() if isinstance(item.change, Change)
else None
)
for other_name in self.pipeline.supercedes:
other_pipeline = self.pipeline.tenant.layout.pipelines.get(
other_name)
if not other_pipeline:
continue
# MODEL_API: >2
if COMPONENT_REGISTRY.model_api > 2:
event = model.SupercedeEvent(
other_pipeline.tenant.name,
other_pipeline.name,
item.change.project.canonical_hostname,
item.change.project.name,
change_id,
item.change.ref)
self.sched.pipeline_trigger_events[
self.pipeline.tenant.name][other_pipeline.name
].put_supercede(event)
else:
# Note: Iterating over the pipelines w/o locking and
# refreshing them is wrong and only kept for backward
# compatibility.
found = None
for other_item in other_pipeline.getAllItems():
if (other_item.live
and other_item.change.equals(item.change)):
found = other_item
break
if found:
self.log.info("Item %s is superceded by %s, dequeuing",
found, item)
event = DequeueEvent(
other_pipeline.tenant.name,
other_pipeline.name,
item.change.project.canonical_hostname,
item.change.project.name,
change_id,
item.change.ref)
self.sched.pipeline_management_events[
self.pipeline.tenant.name][other_pipeline.name].put(
event, needs_result=False)
def updateCommitDependencies(self, change, change_queue, event):
log = get_annotated_logger(self.log, event)
# Search for Depends-On headers and find appropriate changes
log.debug(" Updating commit dependencies for %s", change)
dependencies = []
seen = set()
for match in find_dependency_headers(change.message):
log.debug(" Found Depends-On header: %s", match)
if match in seen:
continue
seen.add(match)
try:
url = urllib.parse.urlparse(match)
except ValueError:
continue
source = self.sched.connections.getSourceByHostname(
url.hostname)
if not source:
continue
log.debug(" Found source: %s", source)
dep = source.getChangeByURLWithRetry(match, event)
if dep and (not dep.is_merged) and dep not in dependencies:
log.debug(" Adding dependency: %s", dep)
dependencies.append(dep)
new_commit_needs_changes = [d.cache_key for d in dependencies]
update_attrs = dict(commit_needs_changes=new_commit_needs_changes)
# Ask the source for any tenant-specific changes (this allows
# drivers to implement their own way of collecting deps):
source = self.sched.connections.getSource(
change.project.connection_name)
if self.useDependenciesByTopic(change.project):
log.debug(" Updating topic dependencies for %s", change)
new_topic_needs_changes = []
for dep in source.getChangesByTopic(change.topic):
if dep and (not dep.is_merged):
log.debug(" Adding dependency: %s", dep)
new_topic_needs_changes.append(dep.cache_key)
update_attrs['topic_needs_changes'] = new_topic_needs_changes
source.setChangeAttributes(change, **update_attrs)
def provisionNodes(self, item):
log = item.annotateLogger(self.log)
jobs = item.findJobsToRequest(item.pipeline.tenant.semaphore_handler)
if not jobs:
return False
build_set = item.current_build_set
log.debug("Requesting nodes for change %s", item.change)
if self.sched.globals.use_relative_priority:
relative_priority = item.getNodePriority()
else:
relative_priority = 0
for job in jobs:
self._makeNodepoolRequest(log, build_set, job, relative_priority)
return True
def _makeNodepoolRequest(self, log, build_set, job, relative_priority,
alternative=0):
provider = self._getPausedParentProvider(build_set, job)
priority = self._calculateNodeRequestPriority(build_set, job)
tenant_name = build_set.item.pipeline.tenant.name
pipeline_name = build_set.item.pipeline.name
item = build_set.item
req = self.sched.nodepool.requestNodes(
build_set.uuid, job, tenant_name, pipeline_name, provider,
priority, relative_priority, event=item.event)
log.debug("Adding node request %s for job %s to item %s",
req, job, item)
build_set.setJobNodeRequestID(job.name, req.id)
if req.fulfilled:
nodeset = self.sched.nodepool.getNodeSet(req, job.nodeset)
build_set.jobNodeRequestComplete(req.job_name, nodeset)
else:
job.setWaitingStatus(f'node request: {req.id}')
def _getPausedParent(self, build_set, job):
job_graph = build_set.job_graph
if job_graph:
for parent in job_graph.getParentJobsRecursively(job.name):
build = build_set.getBuild(parent.name)
if build.paused:
return build
return None
def _getPausedParentProvider(self, build_set, job):
parent_build = self._getPausedParent(build_set, job)
if parent_build:
return build_set.getJobNodeProvider(parent_build.job.name)
return None
def _calculateNodeRequestPriority(self, build_set, job):
precedence_adjustment = 0
precedence = build_set.item.pipeline.precedence
if self._getPausedParent(build_set, job):
precedence_adjustment = -1
initial_precedence = model.PRIORITY_MAP[precedence]
return max(0, initial_precedence + precedence_adjustment)
def _executeJobs(self, item, jobs):
log = get_annotated_logger(self.log, item.event)
log.debug("Executing jobs for change %s", item.change)
build_set = item.current_build_set
for job in jobs:
log.debug("Found job %s for change %s", job, item.change)
try:
zone = build_set.getJobNodeExecutorZone(job.name)
nodes = build_set.getJobNodeList(job.name)
self.sched.executor.execute(
job, nodes, item, self.pipeline, zone,
build_set.dependent_changes,
build_set.merger_items)
job.setWaitingStatus('executor')
except Exception:
log.exception("Exception while executing job %s "
"for change %s:", job, item.change)
try:
# If we hit an exception we don't have a build in the
# current item so a potentially aquired semaphore must be
# released as it won't be released on dequeue of the item.
tenant = item.pipeline.tenant
tenant.semaphore_handler.release(self.sched, item, job)
except Exception:
log.exception("Exception while releasing semaphore")
def executeJobs(self, item):
# TODO(jeblair): This should return a value indicating a job
# was executed. Appears to be a longstanding bug.
if not item.current_build_set.job_graph:
return False
jobs = item.findJobsToRun(
item.pipeline.tenant.semaphore_handler)
if jobs:
self._executeJobs(item, jobs)
def cancelJobs(self, item, prime=True):
log = get_annotated_logger(self.log, item.event)
log.debug("Cancel jobs for change %s", item.change)
canceled = False
old_build_set = item.current_build_set
jobs_to_cancel = item.getJobs()
for job in jobs_to_cancel:
self.sched.cancelJob(old_build_set, job, final=True)
# Don't reset builds for a failing bundle when it has already started
# reporting, to keep available build results. Those items will be
# reported immediately afterwards during queue processing.
if (prime and item.current_build_set.ref and not
item.didBundleStartReporting()):
# Force a dequeued result here because we haven't actually
# reported the item, but we are done with this buildset.
self.reportNormalBuildsetEnd(
item.current_build_set, 'dequeue', final=False,
result='DEQUEUED')
tracing.endSavedSpan(item.current_build_set.span_info)
item.resetAllBuilds()
for item_behind in item.items_behind:
log.debug("Canceling jobs for change %s, behind change %s",
item_behind.change, item.change)
if self.cancelJobs(item_behind, prime=prime):
canceled = True
return canceled
def _findRelevantErrors(self, item, layout):
# First collect all the config errors that are not related to the
# current item.
parent_error_keys = list(
self.pipeline.tenant.layout.loading_errors.error_keys)
for item_ahead in item.items_ahead:
parent_error_keys.extend(
e.key for e in item.item_ahead.getConfigErrors())
# Then find config errors which aren't in the parent. But
# include errors in this project-branch because the error
# detection hash is imperfect and someone attempting to fix an
# error may create a near duplicate error and it would go
# undetected. Or if there are two errors and the user only
# fixes one, then they may not realize their work is
# incomplete.
relevant_errors = []
for err in layout.loading_errors.errors:
econtext = err.key.context
if ((err.key not in parent_error_keys) or
(econtext.project_name == item.change.project.name and
econtext.branch == item.change.branch)):
relevant_errors.append(err)
return relevant_errors
def _loadDynamicLayout(self, item):
log = get_annotated_logger(self.log, item.event)
# Load layout
# Late import to break an import loop
import zuul.configloader
loader = zuul.configloader.ConfigLoader(
self.sched.connections, self.sched.zk_client, self.sched.globals,
self.sched.statsd, self.sched)
log.debug("Loading dynamic layout")
(trusted_updates, untrusted_updates) = item.includesConfigUpdates()
build_set = item.current_build_set
trusted_layout = None
trusted_errors = False
untrusted_layout = None
untrusted_errors = False
try:
# First parse the config as it will land with the
# full set of config and project repos. This lets us
# catch syntax errors in config repos even though we won't
# actually run with that config.
if trusted_updates:
log.debug("Loading dynamic layout (phase 1)")
trusted_layout = loader.createDynamicLayout(
item,
build_set.files,
self.sched.ansible_manager,
include_config_projects=True,
zuul_event_id=None)
trusted_errors = len(trusted_layout.loading_errors) > 0
# Then create the config a second time but without changes
# to config repos so that we actually use this config.
if untrusted_updates:
log.debug("Loading dynamic layout (phase 2)")
untrusted_layout = loader.createDynamicLayout(
item,
build_set.files,
self.sched.ansible_manager,
include_config_projects=False,
zuul_event_id=None)
untrusted_errors = len(untrusted_layout.loading_errors) > 0
# Configuration state handling switchboard. Intentionally verbose
# and repetetive to be exceptionally clear that we handle all
# possible cases correctly. Note we never return trusted_layout
# from a dynamic update.
# No errors found at all use dynamic untrusted layout
if (trusted_layout and not trusted_errors and
untrusted_layout and not untrusted_errors):
log.debug("Loading dynamic layout complete")
return untrusted_layout
# No errors in untrusted only layout update
elif (not trusted_layout and
untrusted_layout and not untrusted_errors):
log.debug("Loading dynamic layout complete")
return untrusted_layout
# No errors in trusted only layout update
elif (not untrusted_layout and
trusted_layout and not trusted_errors):
# We're a change to a config repo (with no untrusted
# config items ahead), so just use the current pipeline
# layout.
log.debug("Loading dynamic layout complete")
return item.queue.pipeline.tenant.layout
# Untrusted layout only works with trusted updates
elif (trusted_layout and not trusted_errors and
untrusted_layout and untrusted_errors):
log.info("Configuration syntax error in dynamic layout")
# The config is good if we include config-projects,
# but is currently invalid if we omit them. Instead
# of returning the whole error message, just leave a
# note that the config will work once the dependent
# changes land.
msg = "This change depends on a change "\
"to a config project.\n\n"
msg += textwrap.fill(textwrap.dedent("""\
The syntax of the configuration in this change has
been verified to be correct once the config project
change upon which it depends is merged, but it can not
be used until that occurs."""))
item.setConfigError(msg)
return None
# Untrusted layout is broken and trusted is broken or not set
elif untrusted_layout and untrusted_errors:
# Find a layout loading error that match
# the current item.change and only report
# if one is found.
relevant_errors = self._findRelevantErrors(item,
untrusted_layout)
if relevant_errors:
item.setConfigErrors(relevant_errors)
return None
log.info(
"Configuration syntax error not related to "
"change context. Error won't be reported.")
return untrusted_layout
# Trusted layout is broken
elif trusted_layout and trusted_errors:
# Find a layout loading error that match
# the current item.change and only report
# if one is found.
relevant_errors = self._findRelevantErrors(item,
trusted_layout)
if relevant_errors:
item.setConfigErrors(relevant_errors)
return None
log.info(
"Configuration syntax error not related to "
"change context. Error won't be reported.")
# We're a change to a config repo with errors not relevant
# to this repo. We use the pipeline layout.
return item.queue.pipeline.tenant.layout
else:
raise Exception("We have reached a configuration error that is"
"not accounted for.")
except Exception:
log.exception("Error in dynamic layout")
item.setConfigError("Unknown configuration error")
return None
def getFallbackLayout(self, item):
parent_item = item.item_ahead
if not parent_item:
return item.pipeline.tenant.layout
return self.getLayout(parent_item)
def getLayout(self, item):
log = get_annotated_logger(self.log, item.event)
layout = self._layout_cache.get(item.layout_uuid)
if layout:
log.debug("Using cached layout %s for item %s", layout.uuid, item)
return layout
if item.layout_uuid:
log.debug("Re-calculating layout for item %s", item)
layout = self._getLayout(item)
if layout:
item.updateAttributes(self.current_context,
layout_uuid=layout.uuid)
self._layout_cache[item.layout_uuid] = layout
return layout
def _getLayout(self, item):
log = get_annotated_logger(self.log, item.event)
if item.item_ahead:
if (
(item.item_ahead.live and
not item.item_ahead.current_build_set.job_graph) or
(not item.item_ahead.live and not item.item_ahead.layout_uuid)
):
# We're probably waiting on a merge job for the item ahead.
return None
# If the current change does not update the layout, use its parent.
# If the bundle doesn't update the config or the bundle updates the
# config but the current change's project is not part of the tenant
# (e.g. when dealing w/ cross-tenant cycles), use the parent layout.
if not (
item.change.updatesConfig(item.pipeline.tenant) or
(
item.bundle
and item.bundle.updatesConfig(item.pipeline.tenant)
and item.pipeline.tenant.getProject(
item.change.project.canonical_name
)[1] is not None
)
):
return self.getFallbackLayout(item)
# Else this item updates the config,
# ask the merger for the result.
build_set = item.current_build_set
if build_set.merge_state != build_set.COMPLETE:
return None
if build_set.unable_to_merge:
return self.getFallbackLayout(item)
log.debug("Preparing dynamic layout for: %s" % item.change)
start = time.time()
layout = self._loadDynamicLayout(item)
self.reportPipelineTiming('layout_generation_time', start)
return layout
def _branchesForRepoState(self, projects, tenant, items=None):
items = items or []
if all(tenant.getExcludeUnprotectedBranches(project)
for project in projects):
branches = set()
# Add all protected branches of all involved projects
for project in projects:
branches.update(
tenant.getProjectBranches(project.canonical_name))
# Additionally add all target branches of all involved items.
branches.update(item.change.branch for item in items
if hasattr(item.change, 'branch'))
# Make sure override-checkout targets are part of the repo state
for item in items:
if not item.current_build_set.job_graph:
continue
for job in item.current_build_set.job_graph.getJobs():
if job.override_checkout:
branches.add(job.override_checkout)
for p in job.required_projects.values():
if p.override_checkout:
branches.add(p.override_checkout)
branches = list(branches)
else:
branches = None
return branches
def scheduleMerge(self, item, files=None, dirs=None):
log = item.annotateLogger(self.log)
log.debug("Scheduling merge for item %s (files: %s, dirs: %s)" %
(item, files, dirs))
build_set = item.current_build_set
# If the involved projects exclude unprotected branches we should also
# exclude them from the merge and repo state except the branch of the
# change that is tested.
tenant = item.pipeline.tenant
items = list(item.items_ahead) + [item]
if item.bundle:
items.extend(item.bundle.items)
projects = {
item.change.project for item in items
if tenant.getProject(item.change.project.canonical_name)[1]
}
branches = self._branchesForRepoState(projects=projects, tenant=tenant,
items=items)
if isinstance(item.change, model.Change):
self.sched.merger.mergeChanges(build_set.merger_items,
item.current_build_set, files, dirs,
precedence=self.pipeline.precedence,
event=item.event,
branches=branches)
else:
self.sched.merger.getRepoState(build_set.merger_items,
item.current_build_set,
precedence=self.pipeline.precedence,
event=item.event,
branches=branches)
build_set.updateAttributes(self.current_context,
merge_state=build_set.PENDING)
return False
def scheduleFilesChanges(self, item):
log = item.annotateLogger(self.log)
log.debug("Scheduling fileschanged for item %s", item)
build_set = item.current_build_set
# if base_sha is not available, fallback to branch
to_sha = getattr(item.change, "base_sha",
getattr(item.change, "branch", None))
self.sched.merger.getFilesChanges(
item.change.project.connection_name, item.change.project.name,
item.change.ref, to_sha, build_set=build_set,
event=item.event)
build_set.updateAttributes(self.current_context,
files_state=build_set.PENDING)
return False
def scheduleGlobalRepoState(self, item: QueueItem) -> bool:
log = item.annotateLogger(self.log)
log.info('Scheduling global repo state for item %s', item)
tenant = item.pipeline.tenant
jobs = item.current_build_set.job_graph.getJobs()
project_cnames = set()
for job in jobs:
log.debug('Processing job %s', job.name)
project_cnames.update(job.affected_projects)
log.debug('Needed projects: %s', project_cnames)
# Filter projects for ones that are already in repo state
repo_state = item.current_build_set.repo_state
connections = self.sched.connections.connections
for connection in repo_state.keys():
canonical_hostname = connections[connection].canonical_hostname
for project in repo_state[connection].keys():
canonical_project_name = canonical_hostname + '/' + project
project_cnames.discard(canonical_project_name)
if not project_cnames:
item.current_build_set.updateAttributes(
self.current_context,
repo_state_state=item.current_build_set.COMPLETE)
return True
# At this point we know we're going to request a merge job;
# set the waiting state on all the item's jobs so users know
# what we're waiting on.
for job in jobs:
job.setWaitingStatus('repo state')
projects = []
for project_cname in project_cnames:
projects.append(tenant.getProject(project_cname)[1])
branches = self._branchesForRepoState(
projects=projects, tenant=tenant, items=[item])
new_items = list()
for project in projects:
new_item = dict()
new_item['project'] = project.name
new_item['connection'] = project.connection_name
new_items.append(new_item)
# Get state for not yet tracked projects
self.sched.merger.getRepoState(items=new_items,
build_set=item.current_build_set,
event=item.event,
branches=branches)
item.current_build_set.updateAttributes(
self.current_context,
repo_state_request_time=time.time(),
repo_state_state=item.current_build_set.PENDING)
return True
def prepareItem(self, item: QueueItem) -> bool:
build_set = item.current_build_set
tenant = item.pipeline.tenant
# We always need to set the configuration of the item if it
# isn't already set.
tpc = tenant.project_configs.get(item.change.project.canonical_name)
if not build_set.ref:
with trace.use_span(tracing.restoreSpan(item.span_info)):
span_info = tracing.startSavedSpan('BuildSet')
build_set.setConfiguration(self.current_context, span_info)
# Next, if a change ahead has a broken config, then so does
# this one. Record that and don't do anything else.
if (item.item_ahead and item.item_ahead.current_build_set and
item.item_ahead.current_build_set.config_errors):
msg = "This change depends on a change "\
"with an invalid configuration.\n"
item.setConfigError(msg)
# Find our layout since the reporter will need it to
# determine if the project is in the pipeline.
self.getLayout(item)
return False
# The next section starts between 0 and 2 remote merger
# operations in parallel as needed.
ready = True
# If the project is in this tenant, fetch missing files so we
# know if it updates the config.
if tpc:
if build_set.files_state == build_set.NEW:
ready = self.scheduleFilesChanges(item)
if build_set.files_state == build_set.PENDING:
ready = False
# If this change alters config or is live, schedule merge and
# build a layout.
# If we are dealing w/ a bundle and the bundle updates config we also
# have to merge since a config change in any of the bundle's items
# applies to all items. This is, unless the current item is not part
# of this tenant (e.g. cross-tenant cycle).
if build_set.merge_state == build_set.NEW:
if item.live or item.change.updatesConfig(tenant) or (
item.bundle and
item.bundle.updatesConfig(tenant) and tpc is not None
):
extra_config_files = set(tpc.extra_config_files)
extra_config_dirs = set(tpc.extra_config_dirs)
# Merge extra_config_files and extra_config_dirs of the
# dependent change
for item_ahead in item.items_ahead:
tpc_ahead = tenant.project_configs.get(
item_ahead.change.project.canonical_name)
if tpc_ahead:
extra_config_files.update(tpc_ahead.extra_config_files)
extra_config_dirs.update(tpc_ahead.extra_config_dirs)
ready = self.scheduleMerge(
item,
files=(['zuul.yaml', '.zuul.yaml'] +
list(extra_config_files)),
dirs=(['zuul.d', '.zuul.d'] +
list(extra_config_dirs)))
if build_set.merge_state == build_set.PENDING:
ready = False
# If a merger op is outstanding, we're not ready.
if not ready:
return False
# If the change can not be merged or has config errors, don't
# run jobs.
if build_set.unable_to_merge or build_set.config_errors:
# Find our layout since the reporter will need it to
# determine if the project is in the pipeline.
self.getLayout(item)
return False
# With the merges done, we have the info needed to get a
# layout. This may return the pipeline layout, a layout from
# a change ahead, a newly generated layout for this change, or
# None if there was an error that makes the layout unusable.
# In the last case, it will have set the config_errors on this
# item, which may be picked up by the next item.
if not (item.layout_uuid or item.current_build_set.job_graph):
layout = self.getLayout(item)
if not layout:
return False
# We don't need to build a job graph for a non-live item, we
# just need the layout.
if not item.live:
return False
# At this point we have a layout for the item, and it's live,
# so freeze the job graph.
log = item.annotateLogger(self.log)
if not item.current_build_set.job_graph:
try:
log.debug("Freezing job graph for %s" % (item,))
start = time.time()
item.freezeJobGraph(self.getLayout(item),
self.current_context,
skip_file_matcher=False,
redact_secrets_and_keys=False)
self.reportPipelineTiming('job_freeze_time', start)
except Exception as e:
# TODOv3(jeblair): nicify this exception as it will be reported
log.exception("Error freezing job graph for %s" % (item,))
item.setConfigError("Unable to freeze job graph: %s" %
(str(e)))
return False
if (item.current_build_set.job_graph and
len(item.current_build_set.job_graph.jobs) > 0):
self.sql.reportBuildsetStart(build_set)
# At this point we know all frozen jobs and their repos so update the
# repo state with all missing repos.
if build_set.repo_state_state == build_set.NEW:
self.scheduleGlobalRepoState(item)
if build_set.repo_state_state == build_set.PENDING:
return False
item.deduplicateJobs(log)
return True
def _processOneItem(self, item, nnfi):
log = item.annotateLogger(self.log)
changed = False
ready = False
dequeued = False
failing_reasons = [] # Reasons this item is failing
item_ahead = item.item_ahead
if item_ahead and (not item_ahead.live):
item_ahead = None
change_queue = item.queue
if COMPONENT_REGISTRY.model_api > 3:
# This sets a QueueItem flag which is only understood by
# api 4.
meets_reqs = self.isChangeReadyToBeEnqueued(
item.change, item.event)
else:
meets_reqs = True
abort, needs_changes = self.getMissingNeededChanges(
item.change, change_queue, item.event)
if not (meets_reqs and not needs_changes):
# It's not okay to enqueue this change, we should remove it.
log.info("Dequeuing change %s because "
"it can no longer merge" % item.change)
self.cancelJobs(item)
if item.isBundleFailing():
item.setDequeuedBundleFailing()
elif not meets_reqs:
item.setDequeuedMissingRequirements()
else:
clist = ', '.join([c.url for c in needs_changes])
if len(needs_changes) > 1:
msg = f'Changes {clist} are needed.'
else:
msg = f'Change {clist} is needed.'
item.setDequeuedNeedingChange(msg)
if item.live:
try:
self.reportItem(item)
except exceptions.MergeFailure:
pass
self.dequeueItem(item)
return (True, nnfi)
actionable = change_queue.isActionable(item)
item.updateAttributes(self.current_context, active=actionable)
dep_items = self.getFailingDependentItems(item, nnfi)
if dep_items:
failing_reasons.append('a needed change is failing')
self.cancelJobs(item, prime=False)
else:
item_ahead_merged = False
if (item_ahead and
hasattr(item_ahead.change, 'is_merged') and
item_ahead.change.is_merged):
item_ahead_merged = True
if (item_ahead != nnfi and not item_ahead_merged):
# Our current base is different than what we expected,
# and it's not because our current base merged. Something
# ahead must have failed.
log.info("Resetting builds for change %s because the "
"item ahead, %s, is not the nearest non-failing "
"item, %s" % (item.change, item_ahead, nnfi))
change_queue.moveItem(item, nnfi)
changed = True
self.cancelJobs(item)
if actionable:
ready = self.prepareItem(item)
# Starting jobs reporting should only be done once if there are
# jobs to run for this item.
if ready and len(self.pipeline.start_actions) > 0 \
and len(item.current_build_set.job_graph.jobs) > 0 \
and not item.reported_start \
and not item.quiet:
self.reportStart(item)
item.updateAttributes(self.current_context,
reported_start=True)
if item.current_build_set.unable_to_merge:
failing_reasons.append("it has a merge conflict")
if item.current_build_set.config_errors:
failing_reasons.append("it has an invalid configuration")
if ready and self.provisionNodes(item):
changed = True
if ready and item.bundle and item.didBundleFinish():
# Since the bundle finished we need to check if any item
# can report. If that's the case we need to process the
# queue again.
changed = changed or any(
i.item_ahead is None for i in item.bundle.items)
if ready and self.executeJobs(item):
changed = True
if item.hasAnyJobFailed():
failing_reasons.append("at least one job failed")
if (not item.live) and (not item.items_behind) and (not dequeued):
failing_reasons.append("is a non-live item with no items behind")
self.dequeueItem(item)
changed = dequeued = True
can_report = not item_ahead and item.areAllJobsComplete() and item.live
if can_report and item.bundle:
can_report = can_report and (
item.isBundleFailing() or item.didBundleFinish()
)
# Before starting to merge the cycle items, make sure they
# can still be merged, to reduce the chance of a partial merge.
if can_report and not item.bundle.started_reporting:
non_mergeable_cycle_changes = self.getNonMergeableCycleChanges(
item.bundle)
if non_mergeable_cycle_changes:
clist = ', '.join([
c.url for c in non_mergeable_cycle_changes])
if len(non_mergeable_cycle_changes) > 1:
msg = f'Changes {clist} can not be merged.'
else:
msg = f'Change {clist} can not be merged.'
item.bundle.cannot_merge = msg
failing_reasons.append("cycle can not be merged")
log.debug(
"Dequeuing item %s because cycle can no longer merge",
item
)
item.bundle.started_reporting = can_report
if can_report:
# If we're starting to report a successful bundle, enable
# two-phase reporting. Report the first phase for every item
# in the bundle, then the second.
phase1 = True
phase2 = True
if (self.changes_merge
and item.bundle
and (not item.cannotMergeBundle())
and (not item.isBundleFailing())):
for i in item.bundle.items:
if not i.reported:
self.log.debug("Report phase1 for bundle item %s", i)
self.reportItem(i, phase1=True, phase2=False)
phase1 = False
try:
self.reportItem(item, phase1=phase1, phase2=phase2)
except exceptions.MergeFailure:
failing_reasons.append("it did not merge")
for item_behind in item.items_behind:
log.info("Resetting builds for change %s because the "
"item ahead, %s, failed to merge" %
(item_behind.change, item))
self.cancelJobs(item_behind)
# Only re-report items in the cycle when we encounter a merge
# failure for a successful bundle.
if (item.bundle and not (
item.isBundleFailing() or item.cannotMergeBundle())):
item.bundle.failed_reporting = True
self.reportProcessedBundleItems(item)
self.dequeueItem(item)
changed = dequeued = True
elif not failing_reasons and item.live:
nnfi = item
if not dequeued:
item.current_build_set.updateAttributes(
self.current_context, failing_reasons=failing_reasons)
if failing_reasons:
log.debug("%s is a failing item because %s" %
(item, failing_reasons))
if (item.live and not dequeued
and self.sched.globals.use_relative_priority):
priority = item.getNodePriority()
for request_id in item.current_build_set.node_requests.values():
node_request = self.sched.nodepool.zk_nodepool.getNodeRequest(
request_id, cached=True)
if not node_request:
continue
if node_request.state != model.STATE_REQUESTED:
# If the node request was locked and accepted by a
# provider, we can no longer update the relative priority.
continue
if node_request.relative_priority != priority:
self.sched.nodepool.reviseRequest(
node_request, priority)
return (changed, nnfi)
def reportProcessedBundleItems(self, item):
"""Report failure to already reported bundle items.
In case we encounter e.g. a merge failure when we already successfully
reported some items, we need to go back and report again.
"""
reported_items = [i for i in item.bundle.items if i.reported]
actions = self.pipeline.failure_actions
for ri in reported_items:
self.sendReport(actions, ri)
if ri is not item:
# Don't override the reported sql result for the item
# that "really" failed.
ri.setReportedResult('FAILURE')
self.reportNormalBuildsetEnd(ri.current_build_set,
'failure', final=True)
def processQueue(self):
# Do whatever needs to be done for each change in the queue
self.log.debug("Starting queue processor: %s" % self.pipeline.name)
changed = False
change_keys = set()
for queue in self.pipeline.queues[:]:
queue_changed = False
nnfi = None # Nearest non-failing item
for item in queue.queue[:]:
item_changed, nnfi = self._processOneItem(
item, nnfi)
if item_changed:
queue_changed = True
self.reportStats(item)
change_keys.add(item.change.cache_stat.key)
if queue_changed:
changed = True
status = ''
for item in queue.queue:
status += item.formatStatus()
if status:
self.log.debug("Queue %s status is now:\n %s" %
(queue.name, status))
self.pipeline.change_list.setChangeKeys(
self.pipeline.manager.current_context,
change_keys)
self._maintainCache()
self.log.debug("Finished queue processor: %s (changed: %s)" %
(self.pipeline.name, changed))
return changed
def onBuildStarted(self, build):
log = get_annotated_logger(self.log, build.zuul_event_id)
log.debug("Build %s started", build)
self.sql.reportBuildStart(build)
self.reportPipelineTiming('job_wait_time',
build.execute_time, build.start_time)
if not build.build_set.item.first_job_start_time:
# Only report this for the first job in a queue item so
# that we don't include gate resets.
build.build_set.item.updateAttributes(
self.current_context,
first_job_start_time=build.start_time)
self.reportPipelineTiming('event_job_time',
build.build_set.item.event.timestamp,
build.start_time)
return True
def onBuildPaused(self, build):
log = get_annotated_logger(self.log, build.zuul_event_id)
item = build.build_set.item
log.debug("Build %s of %s paused", build, item.change)
item.setResult(build)
# We need to resume builds because we could either have no children
# or have children that are already skipped.
self._resumeBuilds(build.build_set)
return True
def _resumeBuilds(self, build_set):
"""
Resumes all paused builds of a buildset that may be resumed.
"""
job_graph = build_set.job_graph
for build in build_set.builds.values():
if not build.paused:
continue
# check if all child jobs are finished
child_builds = [build_set.builds.get(x.name) for x in
job_graph.getDependentJobsRecursively(
build.job.name)]
all_completed = True
for child_build in child_builds:
if not child_build or not child_build.result:
all_completed = False
break
if all_completed:
self.sched.executor.resumeBuild(build)
build.updateAttributes(
build_set.item.pipeline.manager.current_context,
paused=False)
def _resetDependentBuilds(self, build_set, build):
job_graph = build_set.job_graph
for job in job_graph.getDependentJobsRecursively(build.job.name):
self.sched.cancelJob(build_set, job)
build = build_set.getBuild(job.name)
if build:
build_set.removeBuild(build)
# Re-set build results in case we resetted builds that were skipped
# not by this build/
for build in build_set.getBuilds():
if build.result:
build_set.item.setResult(build)
def _cancelRunningBuilds(self, build_set):
item = build_set.item
for job in item.getJobs():
build = build_set.getBuild(job.name)
if not build or not build.result:
self.sched.cancelJob(build_set, job, final=True)
def onBuildCompleted(self, build):
log = get_annotated_logger(self.log, build.zuul_event_id)
item = build.build_set.item
log.debug("Build %s of %s completed" % (build, item.change))
item.pipeline.tenant.semaphore_handler.release(
self.sched, item, build.job)
if item.getJob(build.job.name) is None:
log.info("Build %s no longer in job graph for item %s",
build, item)
return
# If the build was for deduplicated jobs, apply the results to
# all the items that use this build.
build_in_items = [item]
if item.bundle:
for other_item in item.bundle.items:
if other_item in build_in_items:
continue
other_build = other_item.current_build_set.getBuild(
build.job.name)
if other_build is not None and other_build is build:
build_in_items.append(other_item)
for item in build_in_items:
# We don't care about some actions below if this build
# isn't in the current buildset, so determine that before
# it is potentially removed with setResult.
if item.current_build_set.getBuild(build.job.name) is not build:
current = False
else:
current = True
item.setResult(build)
log.debug("Item %s status is now:\n %s", item, item.formatStatus())
if not current:
continue
build_set = item.current_build_set
if build.retry:
if build_set.getJobNodeSetInfo(build.job.name):
build_set.removeJobNodeSetInfo(build.job.name)
# in case this was a paused build we need to retry all
# child jobs
self._resetDependentBuilds(build_set, build)
self._resumeBuilds(build_set)
if (build_set.fail_fast and
build.failed and build.job.voting and not build.retry):
# If fail-fast is set and the build is not successful
# cancel all remaining jobs.
log.debug("Build %s failed and fail-fast enabled, canceling "
"running builds", build)
self._cancelRunningBuilds(build_set)
return True
def onFilesChangesCompleted(self, event, build_set):
item = build_set.item
source = self.sched.connections.getSource(
item.change.project.connection_name)
source.setChangeAttributes(item.change, files=event.files)
build_set.updateAttributes(self.current_context,
files_state=build_set.COMPLETE)
if build_set.merge_state == build_set.COMPLETE:
# We're the second of the files/merger pair, report the stat
self.reportPipelineTiming('merge_request_time',
build_set.configured_time)
if event.elapsed_time:
self.reportPipelineTiming('merger_files_changes_op_time',
event.elapsed_time, elapsed=True)
def onMergeCompleted(self, event, build_set):
if build_set.merge_state == build_set.COMPLETE:
self._onGlobalRepoStateCompleted(event, build_set)
self.reportPipelineTiming('repo_state_time',
build_set.repo_state_request_time)
if event.elapsed_time:
self.reportPipelineTiming('merger_repo_state_op_time',
event.elapsed_time, elapsed=True)
else:
self._onMergeCompleted(event, build_set)
if build_set.files_state == build_set.COMPLETE:
# We're the second of the files/merger pair, report the stat
self.reportPipelineTiming('merge_request_time',
build_set.configured_time)
if event.elapsed_time:
self.reportPipelineTiming('merger_merge_op_time',
event.elapsed_time, elapsed=True)
def _onMergeCompleted(self, event, build_set):
item = build_set.item
source = self.sched.connections.getSource(
item.change.project.connection_name)
if isinstance(item.change, model.Tag):
source.setChangeAttributes(
item.change, containing_branches=event.item_in_branches)
with build_set.activeContext(self.current_context):
build_set.setMergeRepoState(event.repo_state)
build_set.merge_state = build_set.COMPLETE
if event.merged:
build_set.commit = event.commit
items_ahead = item.getNonLiveItemsAhead()
for index, item in enumerate(items_ahead):
if item.current_build_set.files:
continue
with item.current_build_set.activeContext(
self.current_context):
item.current_build_set.setFiles(
event.files[:index + 1])
# An earlier merge job may have supplied our files
if not build_set.files:
build_set.setFiles(event.files)
elif event.updated:
build_set.commit = (item.change.newrev or
'0000000000000000000000000000000000000000')
if not build_set.commit:
self.log.info("Unable to merge change %s" % item.change)
item.setUnableToMerge(event.errors)
def _onGlobalRepoStateCompleted(self, event, build_set):
item = build_set.item
if not event.updated:
self.log.info("Unable to get global repo state for change %s"
% item.change)
item.setUnableToMerge(event.errors)
else:
self.log.info("Received global repo state for change %s"
% item.change)
with build_set.activeContext(self.current_context):
build_set.setExtraRepoState(event.repo_state)
build_set.repo_state_state = build_set.COMPLETE
def _handleNodeRequestFallback(self, log, build_set, job, old_request):
if len(job.nodeset_alternatives) <= job.nodeset_index + 1:
# No alternatives to fall back upon
return False
# Increment the nodeset index and remove the old request
with job.activeContext(self.current_context):
job.nodeset_index = job.nodeset_index + 1
log.info("Re-attempting node request for job "
f"{job.name} of item {build_set.item} "
f"with nodeset alternative {job.nodeset_index}")
build_set.removeJobNodeRequestID(job.name)
# Make a new request
if self.sched.globals.use_relative_priority:
relative_priority = build_set.item.getNodePriority()
else:
relative_priority = 0
log = build_set.item.annotateLogger(self.log)
self._makeNodepoolRequest(log, build_set, job, relative_priority)
return True
def onNodesProvisioned(self, request, nodeset, build_set):
log = get_annotated_logger(self.log, request.event_id)
self.reportPipelineTiming('node_request_time', request.created_time)
job = build_set.item.getJob(request.job_name)
# First see if we need to retry the request
if not request.fulfilled:
log.info("Node request %s: failure for %s",
request, request.job_name)
if self._handleNodeRequestFallback(log, build_set, job, request):
return
# No more fallbacks -- tell the buildset the request is complete
if nodeset is not None:
build_set.jobNodeRequestComplete(request.job_name, nodeset)
# Put a fake build through the cycle to clean it up.
if not request.fulfilled:
fakebuild = build_set.item.setNodeRequestFailure(job)
try:
self.sql.reportBuildEnd(
fakebuild, tenant=build_set.item.pipeline.tenant.name,
final=True)
except Exception:
log.exception("Error reporting build completion to DB:")
self._resumeBuilds(build_set)
tenant = build_set.item.pipeline.tenant
tenant.semaphore_handler.release(
self.sched, build_set.item, job)
log.info("Completed node request %s for job %s of item %s "
"with nodes %s",
request, request.job_name, build_set.item, request.nodes)
def reportItem(self, item, phase1=True, phase2=True):
log = get_annotated_logger(self.log, item.event)
action = None
already_reported = item.reported
if phase2 and not phase1:
already_reported = False
if not already_reported:
action, reported = self._reportItem(item, phase1, phase2)
item.updateAttributes(self.current_context,
reported=reported)
if not phase2:
return
if self.changes_merge:
succeeded = item.didAllJobsSucceed() and not item.isBundleFailing()
merged = item.reported
source = item.change.project.source
if merged:
merged = source.isMerged(item.change, item.change.branch)
if action:
if action == 'success' and not merged:
log.debug("Overriding result for %s to merge failure",
item.change)
action = 'merge-failure'
item.setReportedResult('MERGE_FAILURE')
self.reportNormalBuildsetEnd(item.current_build_set,
action, final=True)
change_queue = item.queue
if not (succeeded and merged):
if (not item.current_build_set.job_graph or
not item.current_build_set.job_graph.jobs):
error_reason = "did not have any jobs configured"
elif not succeeded:
error_reason = "failed tests"
else:
error_reason = "failed to merge"
log.info("Reported change %s did not merge because it %s, "
"status: all-succeeded: %s, merged: %s",
item.change, error_reason, succeeded, merged)
if not succeeded:
change_queue.decreaseWindowSize()
log.debug("%s window size decreased to %s",
change_queue, change_queue.window)
raise exceptions.MergeFailure(
"Change %s failed to merge" % item.change)
else:
self.reportNormalBuildsetEnd(item.current_build_set,
action, final=True)
log.info("Reported change %s status: all-succeeded: %s, "
"merged: %s", item.change, succeeded, merged)
change_queue.increaseWindowSize()
log.debug("%s window size increased to %s",
change_queue, change_queue.window)
zuul_driver = self.sched.connections.drivers['zuul']
tenant = self.pipeline.tenant
zuul_driver.onChangeMerged(tenant, item.change, source)
elif action:
self.reportNormalBuildsetEnd(item.current_build_set,
action, final=True)
def _reportItem(self, item, phase1, phase2):
log = get_annotated_logger(self.log, item.event)
log.debug("Reporting phase1: %s phase2: %s change: %s",
phase1, phase2, item.change)
ret = True # Means error as returned by trigger.report
# In the case of failure, we may not have completed an initial
# merge which would get the layout for this item, so in order
# to determine whether this item's project is in this
# pipeline, use the dynamic layout if available, otherwise,
# fall back to the current static layout as a best
# approximation. However, if we ran jobs, then we obviously
# were in the pipeline config.
project_in_pipeline = bool(item.getJobs())
if not project_in_pipeline:
layout = None
if item.layout_uuid:
layout = self.getLayout(item)
if not layout:
layout = self.pipeline.tenant.layout
try:
project_in_pipeline = bool(
layout.getProjectPipelineConfig(item))
except Exception:
log.exception("Invalid config for change %s", item.change)
if not project_in_pipeline:
log.debug("Project %s not in pipeline %s for change %s",
item.change.project, self.pipeline, item.change)
project_in_pipeline = False
action = 'no-jobs'
actions = self.pipeline.no_jobs_actions
item.setReportedResult('NO_JOBS')
elif item.getConfigErrors():
log.debug("Invalid config for change %s", item.change)
action = 'config-error'
actions = self.pipeline.config_error_actions
item.setReportedResult('CONFIG_ERROR')
elif item.didMergerFail():
log.debug("Merge conflict")
action = 'merge-conflict'
actions = self.pipeline.merge_conflict_actions
item.setReportedResult('MERGE_CONFLICT')
elif item.wasDequeuedNeedingChange():
log.debug("Dequeued needing change")
action = 'failure'
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
elif item.wasDequeuedMissingRequirements():
log.debug("Dequeued missing merge requirements")
action = 'failure'
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
elif not item.getJobs():
# We don't send empty reports with +1
log.debug("No jobs for change %s", item.change)
action = 'no-jobs'
actions = self.pipeline.no_jobs_actions
item.setReportedResult('NO_JOBS')
elif item.cannotMergeBundle():
log.debug("Bundle can not be merged")
action = 'failure'
actions = self.pipeline.failure_actions
item.setReportedResult("FAILURE")
elif item.isBundleFailing():
log.debug("Bundle is failing")
action = 'failure'
actions = self.pipeline.failure_actions
item.setReportedResult("FAILURE")
if not item.didAllJobsSucceed():
with self.pipeline.state.activeContext(self.current_context):
self.pipeline.state.consecutive_failures += 1
elif item.didAllJobsSucceed() and not item.isBundleFailing():
log.debug("success %s", self.pipeline.success_actions)
action = 'success'
actions = self.pipeline.success_actions
item.setReportedResult('SUCCESS')
with self.pipeline.state.activeContext(self.current_context):
self.pipeline.state.consecutive_failures = 0
else:
action = 'failure'
actions = self.pipeline.failure_actions
item.setReportedResult('FAILURE')
with self.pipeline.state.activeContext(self.current_context):
self.pipeline.state.consecutive_failures += 1
if project_in_pipeline and self.pipeline.state.disabled:
actions = self.pipeline.disabled_actions
# Check here if we should disable so that we only use the disabled
# reporters /after/ the last disable_at failure is still reported as
# normal.
if (self.pipeline.disable_at and not self.pipeline.state.disabled and
self.pipeline.state.consecutive_failures
>= self.pipeline.disable_at):
self.pipeline.state.updateAttributes(
self.current_context, disabled=True)
if actions:
log.info("Reporting item %s, actions: %s", item, actions)
ret = self.sendReport(actions, item, phase1, phase2)
if ret:
log.error("Reporting item %s received: %s", item, ret)
return action, (not ret)
def reportStats(self, item, added=False):
if not self.sched.statsd:
return
try:
# Update the gauge on enqueue and dequeue, but timers only
# when dequeing.
if item.dequeue_time:
dt = (item.dequeue_time - item.enqueue_time) * 1000
else:
dt = None
items = len(self.pipeline.getAllItems())
tenant = self.pipeline.tenant
basekey = 'zuul.tenant.%s' % tenant.name
key = '%s.pipeline.%s' % (basekey, self.pipeline.name)
# stats.timers.zuul.tenant.<tenant>.pipeline.<pipeline>.resident_time
# stats_counts.zuul.tenant.<tenant>.pipeline.<pipeline>.total_changes
# stats.gauges.zuul.tenant.<tenant>.pipeline.<pipeline>.current_changes
self.sched.statsd.gauge(key + '.current_changes', items)
if dt:
self.sched.statsd.timing(key + '.resident_time', dt)
self.sched.statsd.incr(key + '.total_changes')
if hasattr(item.change, 'branch'):
hostname = (item.change.project.canonical_hostname.
replace('.', '_'))
projectname = (item.change.project.name.
replace('.', '_').replace('/', '.'))
projectname = projectname.replace('.', '_').replace('/', '.')
branchname = item.change.branch.replace('.', '_').replace(
'/', '.')
# stats.timers.zuul.tenant.<tenant>.pipeline.<pipeline>.
# project.<host>.<project>.<branch>.resident_time
# stats_counts.zuul.tenant.<tenant>.pipeline.<pipeline>.
# project.<host>.<project>.<branch>.total_changes
key += '.project.%s.%s.%s' % (hostname, projectname,
branchname)
if dt:
self.sched.statsd.timing(key + '.resident_time', dt)
self.sched.statsd.incr(key + '.total_changes')
if added and hasattr(item.event, 'arrived_at_scheduler_timestamp'):
now = time.time()
arrived = item.event.arrived_at_scheduler_timestamp
processing = (now - arrived) * 1000
elapsed = (now - item.event.timestamp) * 1000
self.sched.statsd.timing(
basekey + '.event_enqueue_processing_time',
processing)
self.sched.statsd.timing(
basekey + '.event_enqueue_time', elapsed)
self.reportPipelineTiming('event_enqueue_time',
item.event.timestamp)
except Exception:
self.log.exception("Exception reporting pipeline stats")
def reportPipelineTiming(self, key, start, end=None, elapsed=False):
if not self.sched.statsd:
return
if not start:
return
if end is None:
end = time.time()
pipeline = self.pipeline
tenant = pipeline.tenant
stats_key = f'zuul.tenant.{tenant.name}.pipeline.{pipeline.name}'
if elapsed:
dt = start
else:
dt = (end - start) * 1000
self.sched.statsd.timing(f'{stats_key}.{key}', dt)
|