summaryrefslogtreecommitdiff
path: root/buildscripts/update_test_lifecycle.py
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2017-07-17 11:09:34 -0400
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2017-07-17 11:09:34 -0400
commit58a3909a3f678dec7bd94bfb38f96756c970113e (patch)
tree96675ca63ab47a93aca816909805264e6667ea7b /buildscripts/update_test_lifecycle.py
parent27cf9fd7b31f043af913da135385367126f5691b (diff)
downloadmongo-58a3909a3f678dec7bd94bfb38f96756c970113e.tar.gz
SERVER-29642 SERVER-29643 Add Python tests for test lifecycle scripts.
For test_failures.py: * Replaces HistoryReport with a TestHistory class that has get_history_by_revision() and get_history_by_date() methods. They both return a list of ReportEntry tuples that can be used to construct a Report instance. * Adds Python unit test cases for the Report and ReportEntry classes. * Creates Wildcard class as separate concept from Missing class. * Enables --sinceDate and --untilDate with a warning that the script may not return a complete result set. * Adds support for running the script with Python 3. For update_test_lifecycle.py: * Introduces Config namedtuple to represent the test lifecycle model. * Adds Python unit tests cases for the update_tags() function. * Takes advantage of the partial grouping so that computing summaries for (test, task, variant), (test, task), and (test,) combinations do not require re-processing the entire result set.
Diffstat (limited to 'buildscripts/update_test_lifecycle.py')
-rwxr-xr-xbuildscripts/update_test_lifecycle.py525
1 files changed, 348 insertions, 177 deletions
diff --git a/buildscripts/update_test_lifecycle.py b/buildscripts/update_test_lifecycle.py
index 4b4325b255a..9699a5418d8 100755
--- a/buildscripts/update_test_lifecycle.py
+++ b/buildscripts/update_test_lifecycle.py
@@ -4,30 +4,73 @@
Update etc/test_lifecycle.yml to tag unreliable tests based on historic failure rates.
"""
+
+from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
-import copy
+import datetime
import optparse
-import os
+import os.path
import subprocess
import sys
+import textwrap
+import warnings
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
from buildscripts import resmokelib
-from buildscripts.ciconfig import evergreen
-from buildscripts.ciconfig import tags
from buildscripts import test_failures as tf
+from buildscripts.ciconfig import evergreen as ci_evergreen
+from buildscripts.ciconfig import tags as ci_tags
+
+
+if sys.version_info[0] == 2:
+ _NUMBER_TYPES = (int, long, float)
+else:
+ _NUMBER_TYPES = (int, float)
+
+
+Rates = collections.namedtuple("Rates", ["acceptable", "unacceptable"])
+
+
+Config = collections.namedtuple("Config", [
+ "test_fail_rates",
+ "task_fail_rates",
+ "variant_fail_rates",
+ "distro_fail_rates",
+ "reliable_min_runs",
+ "reliable_time_period",
+ "unreliable_min_runs",
+ "unreliable_time_period",
+])
+
+
+DEFAULT_CONFIG = Config(
+ test_fail_rates=Rates(acceptable=0.1, unacceptable=0.3),
+ task_fail_rates=Rates(acceptable=0.1, unacceptable=0.3),
+ variant_fail_rates=Rates(acceptable=0.2, unacceptable=0.4),
+ distro_fail_rates=Rates(acceptable=0.2, unacceptable=0.4),
+ reliable_min_runs=5,
+ reliable_time_period=datetime.timedelta(weeks=1),
+ unreliable_min_runs=20,
+ unreliable_time_period=datetime.timedelta(weeks=4))
+
+
+DEFAULT_PROJECT = "mongodb-mongo-master"
def write_yaml_file(yaml_file, lifecycle):
"""Writes the lifecycle object to yaml_file."""
- comment = ("This file was generated by {} and shouldn't be edited by hand. It was"
- " generated against commit {} with the following invocation: {}.").format(
- sys.argv[0], callo(["git", "rev-parse", "HEAD"]), " ".join(sys.argv))
+
+ comment = (
+ "This file was generated by {} and shouldn't be edited by hand. It was generated against"
+ " commit {} with the following invocation: {}."
+ ).format(sys.argv[0], callo(["git", "rev-parse", "HEAD"]).rstrip(), " ".join(sys.argv))
+
lifecycle.write_file(yaml_file, comment)
@@ -116,7 +159,7 @@ def unreliable_test(test_fr, unacceptable_fr, test_runs, min_run):
A test should be added to the set of tests believed not to run reliably when it has more
than min_run executions with a failure percentage greater than unacceptable_fr.
"""
- return test_runs >= min_run and test_fr > unacceptable_fr
+ return test_runs >= min_run and test_fr >= unacceptable_fr
def reliable_test(test_fr, acceptable_fr, test_runs, min_run):
@@ -125,7 +168,7 @@ def reliable_test(test_fr, acceptable_fr, test_runs, min_run):
A test should then removed from the set of tests believed not to run reliably when it has
less than min_run executions or has a failure percentage less than acceptable_fr.
"""
- return test_runs < min_run or test_fr < acceptable_fr
+ return test_runs < min_run or test_fr <= acceptable_fr
def check_fail_rates(fr_name, acceptable_fr, unacceptable_fr):
@@ -141,16 +184,21 @@ def check_days(name, days):
raise ValueError("'{}' days must be greater than 0.".format(name))
-def unreliable_tag(test, task, variant, distro):
+def unreliable_tag(task, variant, distro):
"""Returns the unreliable tag."""
- if distro and variant and task and test:
- return "unreliable|{}|{}|{}".format(task, variant, distro)
- elif variant and task and test:
- return "unreliable|{}|{}".format(task, variant)
- elif task and test:
- return "unreliable|{}".format(task)
- elif test:
- return "unreliable"
+
+ for (component_name, component_value) in (("task", task),
+ ("variant", variant),
+ ("distro", distro)):
+ if isinstance(component_value, (tf.Wildcard, tf.Missing)):
+ if component_name == "task":
+ return "unreliable"
+ elif component_name == "variant":
+ return "unreliable|{}".format(task)
+ elif component_name == "distro":
+ return "unreliable|{}|{}".format(task, variant)
+
+ return "unreliable|{}|{}|{}".format(task, variant, distro)
def update_lifecycle(lifecycle, report, method_test, add_tags, fail_rate, min_run):
@@ -163,8 +211,7 @@ def update_lifecycle(lifecycle, report, method_test, add_tags, fail_rate, min_ru
fail_rate,
summary.num_pass + summary.num_fail,
min_run):
- update_tag = unreliable_tag(
- summary.test, summary.task, summary.variant, summary.distro)
+ update_tag = unreliable_tag(summary.task, summary.variant, summary.distro)
if add_tags:
lifecycle.add_tag("js_test", summary.test, update_tag)
else:
@@ -175,101 +222,266 @@ def compare_tags(tag_a, tag_b):
return cmp(tag_a.split("|"), tag_b.split("|"))
+def validate_config(config):
+ """
+ Raises a TypeError or ValueError exception if 'config' isn't a valid model.
+ """
+
+ for (name, fail_rates) in (("test", config.test_fail_rates),
+ ("task", config.task_fail_rates),
+ ("variant", config.variant_fail_rates),
+ ("distro", config.distro_fail_rates)):
+ if not isinstance(fail_rates.acceptable, _NUMBER_TYPES):
+ raise TypeError("The acceptable {} failure rate must be a number, but got {}".format(
+ name, fail_rates.acceptable))
+ elif fail_rates.acceptable < 0 or fail_rates.acceptable > 1:
+ raise ValueError(("The acceptable {} failure rate must be between 0 and 1 (inclusive),"
+ " but got {}").format(name, fail_rates.acceptable))
+ elif not isinstance(fail_rates.unacceptable, _NUMBER_TYPES):
+ raise TypeError("The unacceptable {} failure rate must be a number, but got {}".format(
+ name, fail_rates.unacceptable))
+ elif fail_rates.unacceptable < 0 or fail_rates.unacceptable > 1:
+ raise ValueError(("The unacceptable {} failure rate must be between 0 and 1"
+ " (inclusive), but got {}").format(name, fail_rates.unacceptable))
+ elif fail_rates.acceptable > fail_rates.unacceptable:
+ raise ValueError(
+ ("The acceptable {0} failure rate ({1}) must be no larger than unacceptable {0}"
+ " failure rate ({2})").format(
+ name, fail_rates.acceptable, fail_rates.unacceptable))
+
+ for (name, min_runs) in (("reliable", config.reliable_min_runs),
+ ("unreliable", config.unreliable_min_runs)):
+ if not isinstance(min_runs, _NUMBER_TYPES):
+ raise TypeError(("The minimum number of runs for considering a test {} must be a"
+ " number, but got {}").format(name, min_runs))
+ elif min_runs <= 0:
+ raise ValueError(("The minimum number of runs for considering a test {} must be a"
+ " positive integer, but got {}").format(name, min_runs))
+ elif isinstance(min_runs, float) and not min_runs.is_integer():
+ raise ValueError(("The minimum number of runs for considering a test {} must be an"
+ " integer, but got {}").format(name, min_runs))
+
+ for (name, time_period) in (("reliable", config.reliable_time_period),
+ ("unreliable", config.unreliable_time_period)):
+ if not isinstance(time_period, datetime.timedelta):
+ raise TypeError(
+ "The {} time period must be a datetime.timedelta instance, but got {}".format(
+ name, time_period))
+ elif time_period.days <= 0:
+ raise ValueError(
+ "The {} time period must be a positive number of days, but got {}".format(
+ name, time_period))
+ elif time_period - datetime.timedelta(days=time_period.days) > datetime.timedelta():
+ raise ValueError(
+ "The {} time period must be an integral number of days, but got {}".format(
+ name, time_period))
+
+
+def update_tags(lifecycle, config, report):
+ """
+ Updates the tags in 'lifecycle' based on the historical test failures mentioned in 'report'
+ according to the model described by 'config'.
+ """
+
+ # We initialize 'grouped_entries' to make PyLint not complain about 'grouped_entries' being used
+ # before assignment.
+ grouped_entries = None
+ for (i, (components, rates)) in enumerate(
+ ((tf.Report.TEST_TASK_VARIANT_DISTRO, config.distro_fail_rates),
+ (tf.Report.TEST_TASK_VARIANT, config.variant_fail_rates),
+ (tf.Report.TEST_TASK, config.task_fail_rates),
+ (tf.Report.TEST, config.test_fail_rates))):
+ if i > 0:
+ report = tf.Report(grouped_entries)
+
+ # We reassign the value of 'grouped_entries' to take advantage of how data that is on
+ # (test, task, variant, distro) preserves enough information to be grouped on any subset of
+ # those components, etc.
+ grouped_entries = report.summarize_by(components, time_period=tf.Report.DAILY)
+
+ # Filter out any test executions from prior to 'config.unreliable_time_period'.
+ unreliable_start_date = (report.end_date - config.unreliable_time_period
+ + datetime.timedelta(days=1))
+ unreliable_report = tf.Report(entry for entry in grouped_entries
+ if entry.start_date >= unreliable_start_date)
+ update_lifecycle(lifecycle,
+ unreliable_report.summarize_by(components),
+ unreliable_test,
+ True,
+ rates.unacceptable,
+ config.unreliable_min_runs)
+
+ # Filter out any test executions from prior to 'config.reliable_time_period'.
+ reliable_start_date = (report.end_date - config.reliable_time_period
+ + datetime.timedelta(days=1))
+ reliable_report = tf.Report(entry for entry in grouped_entries
+ if entry.start_date >= reliable_start_date)
+ update_lifecycle(lifecycle,
+ reliable_report.summarize_by(components),
+ reliable_test,
+ False,
+ rates.acceptable,
+ config.reliable_min_runs)
+
+
def main():
+ """
+ Utility for updating a resmoke.py tag file based on computing test failure rates from the
+ Evergreen API.
+ """
- required_options = ["project",
- "reliable_test_min_run",
- "unreliable_test_min_run",
- "test_fail_rates",
- ]
- parser = optparse.OptionParser(description=__doc__,
- usage="Usage: %prog [options] test1 test2 ...")
- parser.add_option("--project", dest="project",
- default=None,
- help="Evergreen project to analyze [REQUIRED].")
- parser.add_option("--reliableTestMinimumRun", dest="reliable_test_min_run",
- default=None,
- type="int",
- help="Minimum number of tests runs for test to be considered as reliable"
- " [REQUIRED].")
- parser.add_option("--unreliableTestMinimumRun", dest="unreliable_test_min_run",
- default=None,
- type="int",
- help="Minimum number of tests runs for test to be considered as unreliable"
- " [REQUIRED].")
- parser.add_option("--testFailRates", dest="test_fail_rates",
- metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
- default=None,
- type="float",
- nargs=2,
- help="Test fail rates: acceptable fail rate and unacceptable fail rate"
- " Specify floating numbers between 0.0 and 1.0 [REQUIRED].")
- parser.add_option("--taskFailRates", dest="task_fail_rates",
- metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
- default=None,
- type="float",
- nargs=2,
- help="Task fail rates: acceptable fail rate and unacceptable fail rate."
- " Specify floating numbers between 0.0 and 1.0."
- " Uses --test-fail-rates if unspecified.")
- parser.add_option("--variantFailRates", dest="variant_fail_rates",
- metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
- default=None,
- type="float",
- nargs=2,
- help="Variant fail rates: acceptable fail rate and unacceptable fail rate."
- " Specify floating numbers between 0.0 and 1.0."
- " Uses --task-fail-rates if unspecified.")
- parser.add_option("--distroFailRates", dest="distro_fail_rates",
- metavar="ACCEPTABLE-FAILRATE UNACCEPTABLE-FAILRATE",
- default=None,
- type="float",
- nargs=2,
- help="Distro fail rates: acceptable fail rate and unacceptable fail rate."
- " Specify floating numbers between 0.0 and 1.0."
- " Uses --variant-fail-rates if unspecified.")
- parser.add_option("--tasks", dest="tasks",
- default=None,
- help="Names of tasks to analyze for tagging unreliable tests."
- " If specified and no tests are specified, then only tests"
- " associated with the tasks will be analyzed."
- " If unspecified and no tests are specified, the list of tasks will be"
- " the non-excluded list of tasks from the file specified by"
- " '--evergreenYML'.")
- parser.add_option("--variants", dest="variants",
- default="",
- help="Names of variants to analyze for tagging unreliable tests.")
- parser.add_option("--distros", dest="distros",
- default="",
- help="Names of distros to analyze for tagging unreliable tests [UNUSED].")
- parser.add_option("--evergreenYML", dest="evergreen_yml",
- default="etc/evergreen.yml",
- help="Evergreen YML file used to get the list of tasks,"
- " defaults to '%default'.")
- parser.add_option("--lifecycleFile", dest="lifecycle_file",
+ parser = optparse.OptionParser(description=textwrap.dedent(main.__doc__),
+ usage="Usage: %prog [options] [test1 test2 ...]")
+
+ data_options = optparse.OptionGroup(
+ parser,
+ title="Data options",
+ description=("Options used to configure what historical test failure data to retrieve from"
+ " Evergreen."))
+ parser.add_option_group(data_options)
+
+ data_options.add_option(
+ "--project", dest="project",
+ metavar="<project-name>",
+ default=tf.TestHistory.DEFAULT_PROJECT,
+ help="The Evergreen project to analyze. Defaults to '%default'.")
+
+ data_options.add_option(
+ "--tasks", dest="tasks",
+ metavar="<task1,task2,...>",
+ help=("The Evergreen tasks to analyze for tagging unreliable tests. If specified in"
+ " additional to having test positional arguments, then only tests that run under the"
+ " specified Evergreen tasks will be analyzed. If omitted, then the list of tasks"
+ " defaults to the non-excluded list of tasks from the specified"
+ " --evergreenProjectConfig file."))
+
+ data_options.add_option(
+ "--variants", dest="variants",
+ metavar="<variant1,variant2,...>",
+ default="",
+ help="The Evergreen build variants to analyze for tagging unreliable tests.")
+
+ data_options.add_option(
+ "--distros", dest="distros",
+ metavar="<distro1,distro2,...>",
+ default="",
+ help="The Evergreen distros to analyze for tagging unreliable tests.")
+
+ data_options.add_option(
+ "--evergreenProjectConfig", dest="evergreen_project_config",
+ metavar="<project-config-file>",
+ default="etc/evergreen.yml",
+ help=("The Evergreen project configuration file used to get the list of tasks if --tasks is"
+ " omitted. Defaults to '%default'."))
+
+ model_options = optparse.OptionGroup(
+ parser,
+ title="Model options",
+ description=("Options used to configure whether (test,), (test, task),"
+ " (test, task, variant), and (test, task, variant, distro) combinations are"
+ " considered unreliable."))
+ parser.add_option_group(model_options)
+
+ model_options.add_option(
+ "--reliableTestMinRuns", type="int", dest="reliable_test_min_runs",
+ metavar="<reliable-min-runs>",
+ default=DEFAULT_CONFIG.reliable_min_runs,
+ help=("The minimum number of test executions required for a test's failure rate to"
+ " determine whether the test is considered reliable. If a test has fewer than"
+ " <reliable-min-runs> executions, then it cannot be considered unreliable."))
+
+ model_options.add_option(
+ "--unreliableTestMinRuns", type="int", dest="unreliable_test_min_runs",
+ metavar="<unreliable-min-runs>",
+ default=DEFAULT_CONFIG.unreliable_min_runs,
+ help=("The minimum number of test executions required for a test's failure rate to"
+ " determine whether the test is considered unreliable. If a test has fewer than"
+ " <unreliable-min-runs> executions, then it cannot be considered unreliable."))
+
+ model_options.add_option(
+ "--testFailRates", type="float", nargs=2, dest="test_fail_rates",
+ metavar="<test-acceptable-fail-rate> <test-unacceptable-fail-rate>",
+ default=DEFAULT_CONFIG.test_fail_rates,
+ help=("Controls how readily a test is considered unreliable. Each failure rate must be a"
+ " number between 0 and 1 (inclusive) with"
+ " <test-unacceptable-fail-rate> >= <test-acceptable-fail-rate>. If a test fails no"
+ " more than <test-acceptable-fail-rate> in <reliable-days> time, then it is"
+ " considered reliable. Otherwise, if a test fails at least as much as"
+ " <test-unacceptable-fail-rate> in <test-unreliable-days> time, then it is considered"
+ " unreliable. Defaults to %default."))
+
+ model_options.add_option(
+ "--taskFailRates", type="float", nargs=2, dest="task_fail_rates",
+ metavar="<task-acceptable-fail-rate> <task-unacceptable-fail-rate>",
+ default=DEFAULT_CONFIG.task_fail_rates,
+ help=("Controls how readily a (test, task) combination is considered unreliable. Each"
+ " failure rate must be a number between 0 and 1 (inclusive) with"
+ " <task-unacceptable-fail-rate> >= <task-acceptable-fail-rate>. If a (test, task)"
+ " combination fails no more than <task-acceptable-fail-rate> in <reliable-days> time,"
+ " then it is considered reliable. Otherwise, if a test fails at least as much as"
+ " <task-unacceptable-fail-rate> in <unreliable-days> time, then it is considered"
+ " unreliable. Defaults to %default."))
+
+ model_options.add_option(
+ "--variantFailRates", type="float", nargs=2, dest="variant_fail_rates",
+ metavar="<variant-acceptable-fail-rate> <variant-unacceptable-fail-rate>",
+ default=DEFAULT_CONFIG.variant_fail_rates,
+ help=("Controls how readily a (test, task, variant) combination is considered unreliable."
+ " Each failure rate must be a number between 0 and 1 (inclusive) with"
+ " <variant-unacceptable-fail-rate> >= <variant-acceptable-fail-rate>. If a"
+ " (test, task, variant) combination fails no more than <variant-acceptable-fail-rate>"
+ " in <reliable-days> time, then it is considered reliable. Otherwise, if a test fails"
+ " at least as much as <variant-unacceptable-fail-rate> in <unreliable-days> time,"
+ " then it is considered unreliable. Defaults to %default."))
+
+ model_options.add_option(
+ "--distroFailRates", type="float", nargs=2, dest="distro_fail_rates",
+ metavar="<distro-acceptable-fail-rate> <distro-unacceptable-fail-rate>",
+ default=DEFAULT_CONFIG.distro_fail_rates,
+ help=("Controls how readily a (test, task, variant, distro) combination is considered"
+ " unreliable. Each failure rate must be a number between 0 and 1 (inclusive) with"
+ " <distro-unacceptable-fail-rate> >= <distro-acceptable-fail-rate>. If a"
+ " (test, task, variant, distro) combination fails no more than"
+ " <distro-acceptable-fail-rate> in <reliable-days> time, then it is considered"
+ " reliable. Otherwise, if a test fails at least as much as"
+ " <distro-unacceptable-fail-rate> in <unreliable-days> time, then it is considered"
+ " unreliable. Defaults to %default."))
+
+ model_options.add_option(
+ "--reliableDays", type="int", dest="reliable_days",
+ metavar="<ndays>",
+ default=DEFAULT_CONFIG.reliable_time_period.days,
+ help=("The time period to analyze when determining if a test has become reliable. Defaults"
+ " to %default day(s)."))
+
+ model_options.add_option(
+ "--unreliableDays", type="int", dest="unreliable_days",
+ metavar="<ndays>",
+ default=DEFAULT_CONFIG.unreliable_time_period.days,
+ help=("The time period to analyze when determining if a test has become unreliable."
+ " Defaults to %default day(s)."))
+
+ parser.add_option("--resmokeTagFile", dest="tag_file",
+ metavar="<tagfile>",
default="etc/test_lifecycle.yml",
- help="Evergreen lifecycle file to update, defaults to '%default'.")
- parser.add_option("--reliableDays", dest="reliable_days",
- default=7,
- type="int",
- help="Number of days to check for reliable tests, defaults to '%default'.")
- parser.add_option("--unreliableDays", dest="unreliable_days",
- default=28,
- type="int",
- help="Number of days to check for unreliable tests, defaults to '%default'.")
- parser.add_option("--batchGroupSize", dest="batch_size",
+ help="The resmoke.py tag file to update. Defaults to '%default'.")
+
+ parser.add_option("--requestBatchSize", type="int", dest="batch_size",
+ metavar="<batch-size>",
default=100,
- type="int",
- help="Size of test batch group, defaults to '%default'.")
+ help=("The maximum number of tests to query the Evergreen API for in a single"
+ " request. A higher value for this option will reduce the number of"
+ " roundtrips between this client and Evergreen. Defaults to %default."))
(options, tests) = parser.parse_args()
- for option in required_options:
- if not getattr(options, option):
- parser.print_help()
- parser.error("Missing required option")
+ if options.distros:
+ warnings.warn(
+ ("Until https://jira.mongodb.org/browse/EVG-1665 is implemented, distro information"
+ " isn't returned by the Evergreen API. This option will therefore be ignored."),
+ RuntimeWarning)
- evg_conf = evergreen.EvergreenProjectConfig(options.evergreen_yml)
+ evg_conf = ci_evergreen.EvergreenProjectConfig(options.evergreen_project_config)
use_test_tasks_membership = False
tasks = options.tasks.split(",") if options.tasks else []
@@ -282,25 +494,18 @@ def main():
distros = options.distros.split(",") if options.distros else []
- check_fail_rates("Test", options.test_fail_rates[0], options.test_fail_rates[1])
- # The less specific failures rates are optional and default to a lower level value.
- if not options.task_fail_rates:
- options.task_fail_rates = options.test_fail_rates
- else:
- check_fail_rates("Task", options.task_fail_rates[0], options.task_fail_rates[1])
- if not options.variant_fail_rates:
- options.variant_fail_rates = options.task_fail_rates
- else:
- check_fail_rates("Variant", options.variant_fail_rates[0], options.variant_fail_rates[1])
- if not options.distro_fail_rates:
- options.distro_fail_rates = options.variant_fail_rates
- else:
- check_fail_rates("Distro", options.distro_fail_rates[0], options.distro_fail_rates[1])
-
- check_days("Reliable days", options.reliable_days)
- check_days("Unreliable days", options.unreliable_days)
-
- lifecycle = tags.TagsConfig(options.lifecycle_file, cmp_func=compare_tags)
+ config = Config(
+ test_fail_rates=Rates(*options.test_fail_rates),
+ task_fail_rates=Rates(*options.task_fail_rates),
+ variant_fail_rates=Rates(*options.variant_fail_rates),
+ distro_fail_rates=Rates(*options.distro_fail_rates),
+ reliable_min_runs=options.reliable_test_min_runs,
+ reliable_time_period=datetime.timedelta(days=options.reliable_days),
+ unreliable_min_runs=options.unreliable_test_min_runs,
+ unreliable_time_period=datetime.timedelta(days=options.unreliable_days))
+ validate_config(config)
+
+ lifecycle = ci_tags.TagsConfig.from_file(options.tag_file, cmp_func=compare_tags)
test_tasks_membership = get_test_tasks_membership(evg_conf)
# If no tests are specified then the list of tests is generated from the list of tasks.
@@ -325,58 +530,24 @@ def main():
if not tasks:
print("Warning - No tasks found for tests {}, skipping this group.".format(tests))
continue
- report = tf.HistoryReport(period_type="revision",
- start=commit_prior,
- end=commit_last,
- group_period=options.reliable_days,
- project=options.project,
- tests=tests,
- tasks=tasks,
- variants=variants,
- distros=distros)
- view_report = report.generate_report()
-
- # We build up report_combo to check for more specific test failures rates.
- report_combo = []
- # TODO EVG-1665: Uncomment this line once this has been supported.
- # for combo in ["test", "task", "variant", "distro"]:
- for combo in ["test", "task", "variant"]:
- report_combo.append(combo)
- if combo == "distro":
- acceptable_fail_rate = options.distro_fail_rates[0]
- unacceptable_fail_rate = options.distro_fail_rates[1]
- elif combo == "variant":
- acceptable_fail_rate = options.variant_fail_rates[0]
- unacceptable_fail_rate = options.variant_fail_rates[1]
- elif combo == "task":
- acceptable_fail_rate = options.task_fail_rates[0]
- unacceptable_fail_rate = options.task_fail_rates[1]
- else:
- acceptable_fail_rate = options.test_fail_rates[0]
- unacceptable_fail_rate = options.test_fail_rates[1]
-
- # Unreliable tests are analyzed from the entire period.
- update_lifecycle(lifecycle,
- view_report.view_summary(group_on=report_combo),
- unreliable_test,
- True,
- unacceptable_fail_rate,
- options.unreliable_test_min_run)
-
- # Reliable tests are analyzed from the last period, i.e., last 14 days.
- (reliable_start_date, reliable_end_date) = view_report.last_period()
- update_lifecycle(lifecycle,
- view_report.view_summary(group_on=report_combo,
- start_date=reliable_start_date,
- end_date=reliable_end_date),
- reliable_test,
- False,
- acceptable_fail_rate,
- options.reliable_test_min_run)
-
- # Update the lifecycle_file only if there have been changes.
+
+ test_history = tf.TestHistory(project=options.project,
+ tests=tests,
+ tasks=tasks,
+ variants=variants,
+ distros=distros)
+
+ history_data = test_history.get_history_by_revision(start_revision=commit_prior,
+ end_revision=commit_last)
+
+ report = tf.Report(history_data)
+ update_tags(lifecycle, config, report)
+
+ # We write the 'lifecycle' tag configuration to the 'options.lifecycle_file' file only if there
+ # have been changes to the tags. In particular, we avoid modifying the file when only the header
+ # comment for the YAML file would change.
if lifecycle.is_modified():
- write_yaml_file(options.lifecycle_file, lifecycle)
+ write_yaml_file(options.tag_file, lifecycle)
if __name__ == "__main__":
main()