diff options
Diffstat (limited to 'test/lib/ansible_test/_internal')
94 files changed, 3597 insertions, 900 deletions
diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py index e604a2b35a..18e776ad74 100644 --- a/test/lib/ansible_test/_internal/__init__.py +++ b/test/lib/ansible_test/_internal/__init__.py @@ -3,6 +3,7 @@ from __future__ import annotations import os import sys +import typing as t # This import should occur as early as possible. # It must occur before subprocess has been imported anywhere in the current process. @@ -10,10 +11,14 @@ from .init import ( CURRENT_RLIMIT_NOFILE, ) +from .constants import ( + STATUS_HOST_CONNECTION_ERROR, +) + from .util import ( ApplicationError, + HostConnectionError, display, - MAXFD, ) from .delegation import ( @@ -57,16 +62,18 @@ def main(): display.truncate = config.truncate display.redact = config.redact display.color = config.color - display.info_stderr = config.info_stderr + display.fd = sys.stderr if config.display_stderr else sys.stdout configure_timeout(config) display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2) - display.info('MAXFD: %d' % MAXFD, verbosity=2) delegate_args = None target_names = None try: + if config.check_layout: + data_context().check_layout() + args.func(config) except PrimeContainers: pass @@ -78,20 +85,23 @@ def main(): delegate_args = (ex.host_state, ex.exclude, ex.require) if delegate_args: - # noinspection PyTypeChecker delegate(config, *delegate_args) if target_names: for target_name in target_names: - print(target_name) # info goes to stderr, this should be on stdout + print(target_name) # display goes to stderr, this should be on stdout display.review_warnings() config.success = True + except HostConnectionError as ex: + display.fatal(str(ex)) + ex.run_callback() + sys.exit(STATUS_HOST_CONNECTION_ERROR) except ApplicationWarning as ex: display.warning(u'%s' % ex) sys.exit(0) except ApplicationError as ex: - display.error(u'%s' % ex) + display.fatal(u'%s' % ex) sys.exit(1) except KeyboardInterrupt: sys.exit(2) diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py index 5c689bed48..0f25040385 100644 --- a/test/lib/ansible_test/_internal/ansible_util.py +++ b/test/lib/ansible_test/_internal/ansible_util.py @@ -22,11 +22,11 @@ from .util import ( ANSIBLE_SOURCE_ROOT, ANSIBLE_TEST_TOOLS_ROOT, get_ansible_version, + raw_command, ) from .util_common import ( create_temp_dir, - run_command, ResultType, intercept_python, get_injector_path, @@ -51,6 +51,10 @@ from .host_configs import ( PythonConfig, ) +from .thread import ( + mutex, +) + def parse_inventory(args, inventory_path): # type: (EnvironmentConfig, str) -> t.Dict[str, t.Any] """Return a dict parsed from the given inventory file.""" @@ -193,13 +197,14 @@ def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str] return env +@mutex def get_ansible_python_path(args): # type: (CommonConfig) -> str """ Return a directory usable for PYTHONPATH, containing only the ansible package. If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit. """ try: - return get_ansible_python_path.python_path + return get_ansible_python_path.python_path # type: ignore[attr-defined] except AttributeError: pass @@ -217,7 +222,7 @@ def get_ansible_python_path(args): # type: (CommonConfig) -> str if not args.explain: generate_egg_info(python_path) - get_ansible_python_path.python_path = python_path + get_ansible_python_path.python_path = python_path # type: ignore[attr-defined] return python_path @@ -259,12 +264,12 @@ class CollectionDetailError(ApplicationError): self.reason = reason -def get_collection_detail(args, python): # type: (EnvironmentConfig, PythonConfig) -> CollectionDetail +def get_collection_detail(python): # type: (PythonConfig) -> CollectionDetail """Return collection detail.""" collection = data_context().content.collection directory = os.path.join(collection.root, collection.directory) - stdout = run_command(args, [python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0] + stdout = raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True)[0] result = json.loads(stdout) error = result.get('error') @@ -283,15 +288,15 @@ def run_playbook( args, # type: EnvironmentConfig inventory_path, # type: str playbook, # type: str - run_playbook_vars=None, # type: t.Optional[t.Dict[str, t.Any]] - capture=False, # type: bool + capture, # type: bool + variables=None, # type: t.Optional[t.Dict[str, t.Any]] ): # type: (...) -> None """Run the specified playbook using the given inventory file and playbook variables.""" playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook) cmd = ['ansible-playbook', '-i', inventory_path, playbook_path] - if run_playbook_vars: - cmd.extend(['-e', json.dumps(run_playbook_vars)]) + if variables: + cmd.extend(['-e', json.dumps(variables)]) if args.verbosity: cmd.append('-%s' % ('v' * args.verbosity)) diff --git a/test/lib/ansible_test/_internal/become.py b/test/lib/ansible_test/_internal/become.py index dc0a208a62..5a5506a14e 100644 --- a/test/lib/ansible_test/_internal/become.py +++ b/test/lib/ansible_test/_internal/become.py @@ -5,9 +5,18 @@ import abc import shlex import typing as t +from .util import ( + get_subclasses, +) + class Become(metaclass=abc.ABCMeta): """Base class for become implementations.""" + @classmethod + def name(cls): + """The name of this plugin.""" + return cls.__name__.lower() + @property @abc.abstractmethod def method(self): # type: () -> str @@ -18,6 +27,38 @@ class Become(metaclass=abc.ABCMeta): """Return the given command, if any, with privilege escalation.""" +class Doas(Become): + """Become using 'doas'.""" + @property + def method(self): # type: () -> str + """The name of the Ansible become plugin that is equivalent to this.""" + raise NotImplementedError('Ansible has no built-in doas become plugin.') + + def prepare_command(self, command): # type: (t.List[str]) -> t.List[str] + """Return the given command, if any, with privilege escalation.""" + become = ['doas', '-n'] + + if command: + become.extend(['sh', '-c', ' '.join(shlex.quote(c) for c in command)]) + else: + become.extend(['-s']) + + return become + + +class DoasSudo(Doas): + """Become using 'doas' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands.""" + @classmethod + def name(cls): + """The name of this plugin.""" + return 'doas_sudo' + + @property + def method(self): # type: () -> str + """The name of the Ansible become plugin that is equivalent to this.""" + return 'sudo' + + class Su(Become): """Become using 'su'.""" @property @@ -35,6 +76,19 @@ class Su(Become): return become +class SuSudo(Su): + """Become using 'su' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands.""" + @classmethod + def name(cls): + """The name of this plugin.""" + return 'su_sudo' + + @property + def method(self): # type: () -> str + """The name of the Ansible become plugin that is equivalent to this.""" + return 'sudo' + + class Sudo(Become): """Become using 'sudo'.""" @property @@ -50,3 +104,6 @@ class Sudo(Become): become.extend(['sh', '-c', ' '.join(shlex.quote(c) for c in command)]) return become + + +SUPPORTED_BECOME_METHODS = {cls.name(): cls for cls in get_subclasses(Become)} diff --git a/test/lib/ansible_test/_internal/bootstrap.py b/test/lib/ansible_test/_internal/bootstrap.py index 9eb26de7d2..326973978a 100644 --- a/test/lib/ansible_test/_internal/bootstrap.py +++ b/test/lib/ansible_test/_internal/bootstrap.py @@ -35,8 +35,8 @@ class Bootstrap: """The bootstrap type to pass to the bootstrapping script.""" return self.__class__.__name__.replace('Bootstrap', '').lower() - def get_variables(self): # type: () -> t.Dict[str, str] - """The variables to template in the boostrapping script.""" + def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] + """The variables to template in the bootstrapping script.""" return dict( bootstrap_type=self.bootstrap_type, controller='yes' if self.controller else '', @@ -65,8 +65,8 @@ class Bootstrap: @dataclasses.dataclass class BootstrapDocker(Bootstrap): """Bootstrap docker instances.""" - def get_variables(self): # type: () -> t.Dict[str, str] - """The variables to template in the boostrapping script.""" + def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] + """The variables to template in the bootstrapping script.""" variables = super().get_variables() variables.update( @@ -83,8 +83,8 @@ class BootstrapRemote(Bootstrap): platform: str platform_version: str - def get_variables(self): # type: () -> t.Dict[str, str] - """The variables to template in the boostrapping script.""" + def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] + """The variables to template in the bootstrapping script.""" variables = super().get_variables() variables.update( diff --git a/test/lib/ansible_test/_internal/cgroup.py b/test/lib/ansible_test/_internal/cgroup.py new file mode 100644 index 0000000000..52779599fc --- /dev/null +++ b/test/lib/ansible_test/_internal/cgroup.py @@ -0,0 +1,110 @@ +"""Linux control group constants, classes and utilities.""" +from __future__ import annotations + +import codecs +import dataclasses +import pathlib +import re + + +class CGroupPath: + """Linux cgroup path constants.""" + ROOT = '/sys/fs/cgroup' + SYSTEMD = '/sys/fs/cgroup/systemd' + SYSTEMD_RELEASE_AGENT = '/sys/fs/cgroup/systemd/release_agent' + + +class MountType: + """Linux filesystem mount type constants.""" + TMPFS = 'tmpfs' + CGROUP_V1 = 'cgroup' + CGROUP_V2 = 'cgroup2' + + +@dataclasses.dataclass(frozen=True) +class CGroupEntry: + """A single cgroup entry parsed from '/proc/{pid}/cgroup' in the proc filesystem.""" + id: int + subsystem: str + path: pathlib.PurePosixPath + + @property + def root_path(self): + """The root path for this cgroup subsystem.""" + return pathlib.PurePosixPath(CGroupPath.ROOT, self.subsystem) + + @property + def full_path(self) -> pathlib.PurePosixPath: + """The full path for this cgroup subsystem.""" + return pathlib.PurePosixPath(self.root_path, str(self.path).lstrip('/')) + + @classmethod + def parse(cls, value: str) -> CGroupEntry: + """Parse the given cgroup line from the proc filesystem and return a cgroup entry.""" + cid, subsystem, path = value.split(':') + + return cls( + id=int(cid), + subsystem=re.sub('^name=', '', subsystem), + path=pathlib.PurePosixPath(path) + ) + + @classmethod + def loads(cls, value: str) -> tuple[CGroupEntry, ...]: + """Parse the given output from the proc filesystem and return a tuple of cgroup entries.""" + return tuple(cls.parse(line) for line in value.splitlines()) + + +@dataclasses.dataclass(frozen=True) +class MountEntry: + """A single mount info entry parsed from '/proc/{pid}/mountinfo' in the proc filesystem.""" + mount_id: int + parent_id: int + device_major: int + device_minor: int + root: pathlib.PurePosixPath + path: pathlib.PurePosixPath + options: tuple[str, ...] + fields: tuple[str, ...] + type: str + source: pathlib.PurePosixPath + super_options: tuple[str, ...] + + @classmethod + def parse(cls, value: str) -> MountEntry: + """Parse the given mount info line from the proc filesystem and return a mount entry.""" + # See: https://man7.org/linux/man-pages/man5/proc.5.html + # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L135 + mount_id, parent_id, device_major_minor, root, path, options, *remainder = value.split(' ') + fields = remainder[:-4] + separator, mtype, source, super_options = remainder[-4:] + + assert separator == '-' + + device_major, device_minor = device_major_minor.split(':') + + return cls( + mount_id=int(mount_id), + parent_id=int(parent_id), + device_major=int(device_major), + device_minor=int(device_minor), + root=_decode_path(root), + path=_decode_path(path), + options=tuple(options.split(',')), + fields=tuple(fields), + type=mtype, + source=_decode_path(source), + super_options=tuple(super_options.split(',')), + ) + + @classmethod + def loads(cls, value: str) -> tuple[MountEntry, ...]: + """Parse the given output from the proc filesystem and return a tuple of mount info entries.""" + return tuple(cls.parse(line) for line in value.splitlines()) + + +def _decode_path(value: str) -> pathlib.PurePosixPath: + """Decode and return a path which may contain octal escape sequences.""" + # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L150 + path = re.sub(r'(\\[0-7]{3})', lambda m: codecs.decode(m.group(0).encode('ascii'), 'unicode_escape'), value) + return pathlib.PurePosixPath(path) diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py index db5ca501f4..3d0f79e83a 100644 --- a/test/lib/ansible_test/_internal/ci/__init__.py +++ b/test/lib/ansible_test/_internal/ci/__init__.py @@ -114,7 +114,7 @@ class AuthHelper(metaclass=abc.ABCMeta): def initialize_private_key(self): # type: () -> str """ Initialize and publish a new key pair (if needed) and return the private key. - The private key is cached across ansible-test invocations so it is only generated and published once per CI job. + The private key is cached across ansible-test invocations, so it is only generated and published once per CI job. """ path = os.path.expanduser('~/.ansible-core-ci-private.key') @@ -166,14 +166,12 @@ class CryptographyAuthHelper(AuthHelper, metaclass=abc.ABCMeta): private_key = ec.generate_private_key(ec.SECP384R1(), default_backend()) public_key = private_key.public_key() - # noinspection PyUnresolvedReferences - private_key_pem = to_text(private_key.private_bytes( + private_key_pem = to_text(private_key.private_bytes( # type: ignore[attr-defined] # documented method, but missing from type stubs encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), )) - # noinspection PyTypeChecker public_key_pem = to_text(public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo, diff --git a/test/lib/ansible_test/_internal/classification/__init__.py b/test/lib/ansible_test/_internal/classification/__init__.py index 532fa680e8..c599d36edf 100644 --- a/test/lib/ansible_test/_internal/classification/__init__.py +++ b/test/lib/ansible_test/_internal/classification/__init__.py @@ -15,6 +15,7 @@ from ..target import ( walk_sanity_targets, load_integration_prefixes, analyze_integration_target_dependencies, + IntegrationTarget, ) from ..util import ( @@ -63,14 +64,14 @@ def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, 'integration': set(), 'windows-integration': set(), 'network-integration': set(), - } + } # type: t.Dict[str, t.Set[str]] focused_commands = collections.defaultdict(set) - deleted_paths = set() - original_paths = set() - additional_paths = set() - no_integration_paths = set() + deleted_paths = set() # type: t.Set[str] + original_paths = set() # type: t.Set[str] + additional_paths = set() # type: t.Set[str] + no_integration_paths = set() # type: t.Set[str] for path in paths: if not os.path.exists(path): @@ -110,7 +111,7 @@ def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, tests = all_tests(args) # not categorized, run all tests display.warning('Path not categorized: %s' % path) else: - focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths + focused_target = bool(tests.pop(FOCUSED_TARGET, None)) and path in original_paths tests = dict((key, value) for key, value in tests.items() if value) @@ -155,18 +156,18 @@ def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, if any(target == 'all' for target in targets): commands[command] = {'all'} - commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets) + sorted_commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets) focused_commands = dict((c, sorted(targets)) for c, targets in focused_commands.items()) - for command, targets in commands.items(): + for command, targets in sorted_commands.items(): if targets == ['all']: - commands[command] = [] # changes require testing all targets, do not filter targets + sorted_commands[command] = [] # changes require testing all targets, do not filter targets changes = ChangeDescription() changes.command = verbose_command changes.changed_paths = sorted(original_paths) changes.deleted_paths = sorted(deleted_paths) - changes.regular_command_targets = commands + changes.regular_command_targets = sorted_commands changes.focused_command_targets = focused_commands changes.no_integration_paths = sorted(no_integration_paths) @@ -205,11 +206,11 @@ class PathMapper: self.prefixes = load_integration_prefixes() self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets) - self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed - self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed - self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed + self.python_module_utils_imports = {} # type: t.Dict[str, t.Set[str]] # populated on first use to reduce overhead when not needed + self.powershell_module_utils_imports = {} # type: t.Dict[str, t.Set[str]] # populated on first use to reduce overhead when not needed + self.csharp_module_utils_imports = {} # type: t.Dict[str, t.Set[str]] # populated on first use to reduce overhead when not needed - self.paths_to_dependent_targets = {} + self.paths_to_dependent_targets = {} # type: t.Dict[str, t.Set[IntegrationTarget]] for target in self.integration_targets: for path in target.needs_file: @@ -341,7 +342,7 @@ class PathMapper: filename = os.path.basename(path) name, ext = os.path.splitext(filename) - minimal = {} + minimal = {} # type: t.Dict[str, str] if os.path.sep not in path: if filename in ( @@ -372,7 +373,7 @@ class PathMapper: 'integration': target.name if 'posix/' in target.aliases else None, 'windows-integration': target.name if 'windows/' in target.aliases else None, 'network-integration': target.name if 'network/' in target.aliases else None, - FOCUSED_TARGET: True, + FOCUSED_TARGET: target.name, } if is_subdir(path, data_context().content.integration_path): @@ -430,7 +431,7 @@ class PathMapper: 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None, 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None, 'network-integration': self.network_integration_by_module.get(module_name), - FOCUSED_TARGET: True, + FOCUSED_TARGET: module_name, } return minimal @@ -582,7 +583,7 @@ class PathMapper: 'windows-integration': target.name if target and 'windows/' in target.aliases else None, 'network-integration': target.name if target and 'network/' in target.aliases else None, 'units': units_path, - FOCUSED_TARGET: target is not None, + FOCUSED_TARGET: target.name if target else None, } if is_subdir(path, data_context().content.plugin_paths['filter']): @@ -630,7 +631,7 @@ class PathMapper: filename = os.path.basename(path) dummy, ext = os.path.splitext(filename) - minimal = {} + minimal = {} # type: t.Dict[str, str] if path.startswith('changelogs/'): return minimal @@ -674,7 +675,7 @@ class PathMapper: filename = os.path.basename(path) name, ext = os.path.splitext(filename) - minimal = {} + minimal = {} # type: t.Dict[str, str] if path.startswith('bin/'): return all_tests(self.args) # broad impact, run all tests @@ -721,7 +722,6 @@ class PathMapper: if path.startswith('test/lib/ansible_test/config/'): if name.startswith('cloud-config-'): - # noinspection PyTypeChecker cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0] if cloud_target in self.integration_targets_by_alias: @@ -746,13 +746,13 @@ class PathMapper: if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'): return { 'sanity': 'all', # test infrastructure, run all sanity checks - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/ansible_test/_internal/commands/units/'): return { 'units': 'all', # test infrastructure, run all unit tests - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/ansible_test/_data/requirements/'): @@ -776,13 +776,13 @@ class PathMapper: if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'): return { 'sanity': 'all', # test infrastructure, run all sanity checks - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/ansible_test/_util/target/pytest/'): return { 'units': 'all', # test infrastructure, run all unit tests - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/'): diff --git a/test/lib/ansible_test/_internal/classification/powershell.py b/test/lib/ansible_test/_internal/classification/powershell.py index 72715de00b..bc73b7487c 100644 --- a/test/lib/ansible_test/_internal/classification/powershell.py +++ b/test/lib/ansible_test/_internal/classification/powershell.py @@ -83,7 +83,7 @@ def extract_powershell_module_utils_imports(path, module_utils): # type: (str, for line in lines: line_number += 1 - match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line) + match = re.search(r'(?i)^#\s*(?:requires\s+-modules?|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line) if not match: continue diff --git a/test/lib/ansible_test/_internal/classification/python.py b/test/lib/ansible_test/_internal/classification/python.py index ac2d99a756..d81b459b24 100644 --- a/test/lib/ansible_test/_internal/classification/python.py +++ b/test/lib/ansible_test/_internal/classification/python.py @@ -236,7 +236,7 @@ class ModuleUtilFinder(ast.NodeVisitor): def __init__(self, path, module_utils): # type: (str, t.Set[str]) -> None self.path = path self.module_utils = module_utils - self.imports = set() + self.imports = set() # type: t.Set[str] # implicitly import parent package @@ -277,7 +277,6 @@ class ModuleUtilFinder(ast.NodeVisitor): # While that will usually be true, there are exceptions which will result in this resolution being incorrect. self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path)) - # noinspection PyPep8Naming # pylint: disable=locally-disabled, invalid-name def visit_Import(self, node): # type: (ast.Import) -> None """Visit an import node.""" @@ -287,7 +286,6 @@ class ModuleUtilFinder(ast.NodeVisitor): # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE] self.add_imports([alias.name for alias in node.names], node.lineno) - # noinspection PyPep8Naming # pylint: disable=locally-disabled, invalid-name def visit_ImportFrom(self, node): # type: (ast.ImportFrom) -> None """Visit an import from node.""" diff --git a/test/lib/ansible_test/_internal/cli/__init__.py b/test/lib/ansible_test/_internal/cli/__init__.py index 21c45b6e32..dad678beb3 100644 --- a/test/lib/ansible_test/_internal/cli/__init__.py +++ b/test/lib/ansible_test/_internal/cli/__init__.py @@ -13,23 +13,26 @@ from .commands import ( do_commands, ) +from .epilog import ( + get_epilog, +) from .compat import ( HostSettings, convert_legacy_args, ) +from ..util import ( + get_ansible_version, +) + def parse_args(): # type: () -> argparse.Namespace """Parse command line arguments.""" completer = CompositeActionCompletionFinder() - if completer.enabled: - epilog = 'Tab completion available using the "argcomplete" python package.' - else: - epilog = 'Install the "argcomplete" python package to enable tab completion.' - - parser = argparse.ArgumentParser(epilog=epilog) + parser = argparse.ArgumentParser(prog='ansible-test', epilog=get_epilog(completer), formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('--version', action='version', version=f'%(prog)s version {get_ansible_version()}') do_commands(parser, completer) diff --git a/test/lib/ansible_test/_internal/cli/argparsing/__init__.py b/test/lib/ansible_test/_internal/cli/argparsing/__init__.py index 8a087ebf8f..66dfc4e4a0 100644 --- a/test/lib/ansible_test/_internal/cli/argparsing/__init__.py +++ b/test/lib/ansible_test/_internal/cli/argparsing/__init__.py @@ -37,7 +37,7 @@ class RegisteredCompletionFinder(OptionCompletionFinder): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.registered_completions = None # type: t.Optional[str] + self.registered_completions = None # type: t.Optional[t.List[str]] def completer( self, @@ -88,20 +88,18 @@ class CompositeAction(argparse.Action, metaclass=abc.ABCMeta): """Base class for actions that parse composite arguments.""" documentation_state = {} # type: t.Dict[t.Type[CompositeAction], DocumentationState] - # noinspection PyUnusedLocal def __init__( self, *args, - dest, # type: str **kwargs, ): - del dest - self.definition = self.create_parser() self.documentation_state[type(self)] = documentation_state = DocumentationState() self.definition.document(documentation_state) - super().__init__(*args, dest=self.definition.dest, **kwargs) + kwargs.update(dest=self.definition.dest) + + super().__init__(*args, **kwargs) register_safe_action(type(self)) @@ -139,10 +137,12 @@ class CompositeActionCompletionFinder(RegisteredCompletionFinder): def get_completions( self, prefix, # type: str - action, # type: CompositeAction + action, # type: argparse.Action parsed_args, # type: argparse.Namespace ): # type: (...) -> t.List[str] """Return a list of completions appropriate for the given prefix and action, taking into account the arguments that have already been parsed.""" + assert isinstance(action, CompositeAction) + state = ParserState( mode=ParserMode.LIST if self.list_mode else ParserMode.COMPLETE, remainder=prefix, @@ -238,6 +238,8 @@ def complete( """Perform argument completion using the given completer and return the completion result.""" value = state.remainder + answer: Completion + try: completer.parse(state) raise ParserError('completion expected') diff --git a/test/lib/ansible_test/_internal/cli/argparsing/actions.py b/test/lib/ansible_test/_internal/cli/argparsing/actions.py index c2b573e639..e3d0fd1c75 100644 --- a/test/lib/ansible_test/_internal/cli/argparsing/actions.py +++ b/test/lib/ansible_test/_internal/cli/argparsing/actions.py @@ -7,8 +7,8 @@ import typing as t class EnumAction(argparse.Action): - """Parse an enum using the lowercases enum names.""" - def __init__(self, **kwargs): # type: (t.Dict[str, t.Any]) -> None + """Parse an enum using the lowercase enum names.""" + def __init__(self, **kwargs: t.Any) -> None: self.enum_type = kwargs.pop('type', None) # type: t.Type[enum.Enum] kwargs.setdefault('choices', tuple(e.name.lower() for e in self.enum_type)) super().__init__(**kwargs) diff --git a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py index fe80a68e5d..dcff978c9a 100644 --- a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py +++ b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py @@ -173,7 +173,7 @@ class ParserState: self.namespaces.append(namespace) @contextlib.contextmanager - def delimit(self, delimiters, required=True): # type: (str, bool) -> t.ContextManager[ParserBoundary] + def delimit(self, delimiters, required=True): # type: (str, bool) -> t.Iterator[ParserBoundary] """Context manager for delimiting parsing of input.""" boundary = ParserBoundary(delimiters=delimiters, required=required) @@ -286,6 +286,19 @@ class ChoicesParser(DynamicChoicesParser): return '|'.join(self.choices) +class EnumValueChoicesParser(ChoicesParser): + """Composite argument parser which relies on a static list of choices derived from the values of an enum.""" + def __init__(self, enum_type: t.Type[enum.Enum], conditions: MatchConditions = MatchConditions.CHOICE) -> None: + self.enum_type = enum_type + + super().__init__(choices=[str(item.value) for item in enum_type], conditions=conditions) + + def parse(self, state: ParserState) -> t.Any: + """Parse the input from the given state and return the result.""" + value = super().parse(state) + return self.enum_type(value) + + class IntegerParser(DynamicChoicesParser): """Composite argument parser for integers.""" PATTERN = re.compile('^[1-9][0-9]*$') @@ -394,7 +407,7 @@ class FileParser(Parser): else: path = '' - with state.delimit(PATH_DELIMITER, required=False) as boundary: + with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: directory = path or '.' @@ -420,7 +433,7 @@ class AbsolutePathParser(Parser): """Parse the input from the given state and return the result.""" path = '' - with state.delimit(PATH_DELIMITER, required=False) as boundary: + with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: if path: path += AnyParser(nothing=True).parse(state) @@ -506,7 +519,7 @@ class KeyValueParser(Parser, metaclass=abc.ABCMeta): parsers = self.get_parsers(state) keys = list(parsers) - with state.delimit(PAIR_DELIMITER, required=False) as pair: + with state.delimit(PAIR_DELIMITER, required=False) as pair: # type: ParserBoundary while pair.ready: with state.delimit(ASSIGNMENT_DELIMITER): key = ChoicesParser(keys).parse(state) @@ -528,7 +541,7 @@ class PairParser(Parser, metaclass=abc.ABCMeta): state.set_namespace(namespace) - with state.delimit(self.delimiter, self.required) as boundary: + with state.delimit(self.delimiter, self.required) as boundary: # type: ParserBoundary choice = self.get_left_parser(state).parse(state) if boundary.match: diff --git a/test/lib/ansible_test/_internal/cli/commands/__init__.py b/test/lib/ansible_test/_internal/cli/commands/__init__.py index 5cd37f4f91..81bb465372 100644 --- a/test/lib/ansible_test/_internal/cli/commands/__init__.py +++ b/test/lib/ansible_test/_internal/cli/commands/__init__.py @@ -11,6 +11,7 @@ from ...util import ( from ..completers import ( complete_target, + register_completer, ) from ..environments import ( @@ -110,33 +111,33 @@ def do_commands( testing = test.add_argument_group(title='common testing arguments') - testing.add_argument( + register_completer(testing.add_argument( 'include', metavar='TARGET', nargs='*', help='test the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) - testing.add_argument( + register_completer(testing.add_argument( '--include', metavar='TARGET', action='append', help='include the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) - testing.add_argument( + register_completer(testing.add_argument( '--exclude', metavar='TARGET', action='append', help='exclude the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) - testing.add_argument( + register_completer(testing.add_argument( '--require', metavar='TARGET', action='append', help='require the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) testing.add_argument( '--coverage', diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py b/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py index f79fb1cfc2..7ef28919a4 100644 --- a/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py +++ b/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py @@ -5,6 +5,7 @@ import argparse from ...completers import ( complete_target, + register_completer, ) from ...environments import ( @@ -43,12 +44,12 @@ def do_integration( def add_integration_common( parser, # type: argparse.ArgumentParser ): - """Add common integration argumetns.""" - parser.add_argument( + """Add common integration arguments.""" + register_completer(parser.add_argument( '--start-at', metavar='TARGET', help='start at the specified target', - ).completer = complete_target + ), complete_target) parser.add_argument( '--start-at-task', diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/network.py b/test/lib/ansible_test/_internal/cli/commands/integration/network.py index d070afda9b..86729195b0 100644 --- a/test/lib/ansible_test/_internal/cli/commands/integration/network.py +++ b/test/lib/ansible_test/_internal/cli/commands/integration/network.py @@ -28,6 +28,10 @@ from ...environments import ( add_environments, ) +from ...completers import ( + register_completer, +) + def do_network_integration( subparsers, @@ -51,16 +55,16 @@ def do_network_integration( add_integration_common(network_integration) - network_integration.add_argument( + register_completer(network_integration.add_argument( '--testcase', metavar='TESTCASE', help='limit a test to a specified testcase', - ).completer = complete_network_testcase + ), complete_network_testcase) add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NETWORK_INTEGRATION) # network-integration -def complete_network_testcase(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_testcase(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of test cases matching the given prefix if only one target was parsed from the command line, otherwise return an empty list.""" testcases = [] diff --git a/test/lib/ansible_test/_internal/cli/commands/shell.py b/test/lib/ansible_test/_internal/cli/commands/shell.py index 301ff70e90..7d52b39e05 100644 --- a/test/lib/ansible_test/_internal/cli/commands/shell.py +++ b/test/lib/ansible_test/_internal/cli/commands/shell.py @@ -39,9 +39,21 @@ def do_shell( shell = parser.add_argument_group(title='shell arguments') shell.add_argument( + 'cmd', + nargs='*', + help='run the specified command', + ) + + shell.add_argument( '--raw', action='store_true', help='direct to shell with no setup', ) + shell.add_argument( + '--export', + metavar='PATH', + help='export inventory instead of opening a shell', + ) + add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.SHELL) # shell diff --git a/test/lib/ansible_test/_internal/cli/compat.py b/test/lib/ansible_test/_internal/cli/compat.py index 2090aac711..0a23c2306f 100644 --- a/test/lib/ansible_test/_internal/cli/compat.py +++ b/test/lib/ansible_test/_internal/cli/compat.py @@ -55,7 +55,7 @@ from ..data import ( ) -def filter_python(version, versions): # type: (t.Optional[str], t.Optional[t.List[str]]) -> t.Optional[str] +def filter_python(version, versions): # type: (t.Optional[str], t.Optional[t.Sequence[str]]) -> t.Optional[str] """If a Python version is given and is in the given version list, return that Python version, otherwise return None.""" return version if version in versions else None @@ -115,6 +115,7 @@ class LegacyHostOptions: venv_system_site_packages: t.Optional[bool] = None remote: t.Optional[str] = None remote_provider: t.Optional[str] = None + remote_arch: t.Optional[str] = None docker: t.Optional[str] = None docker_privileged: t.Optional[bool] = None docker_seccomp: t.Optional[str] = None @@ -201,6 +202,9 @@ def convert_legacy_args( '--controller', '--target', '--target-python', + '--target-posix', + '--target-windows', + '--target-network', ] used_old_options = old_options.get_options_used() @@ -237,8 +241,8 @@ def convert_legacy_args( args.targets = targets if used_default_pythons: - targets = t.cast(t.List[ControllerConfig], targets) - skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in targets})) + control_targets = t.cast(t.List[ControllerConfig], targets) + skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in control_targets})) else: skipped_python_versions = [] @@ -260,10 +264,12 @@ def controller_targets( mode, # type: TargetMode options, # type: LegacyHostOptions controller, # type: ControllerHostConfig -): # type: (...) -> t.List[ControllerConfig] +): # type: (...) -> t.List[HostConfig] """Return the configuration for controller targets.""" python = native_python(options) + targets: t.List[HostConfig] + if python: targets = [ControllerConfig(python=python)] else: @@ -283,7 +289,7 @@ def native_python(options): # type: (LegacyHostOptions) -> t.Optional[NativePyt def get_legacy_host_config( mode, # type: TargetMode options, # type: LegacyHostOptions -): # type: (...) -> t.Tuple[HostConfig, t.List[HostConfig], t.Optional[FallbackDetail]] +): # type: (...) -> t.Tuple[ControllerHostConfig, t.List[HostConfig], t.Optional[FallbackDetail]] """ Returns controller and target host configs derived from the provided legacy host options. The goal is to match the original behavior, by using non-split testing whenever possible. @@ -296,6 +302,9 @@ def get_legacy_host_config( controller_fallback = None # type: t.Optional[t.Tuple[str, str, FallbackReason]] + controller: t.Optional[ControllerHostConfig] + targets: t.List[HostConfig] + if options.venv: if controller_python(options.python) or not options.python: controller = OriginConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages)) @@ -304,14 +313,21 @@ def get_legacy_host_config( controller = OriginConfig(python=VirtualPythonConfig(version='default', system_site_packages=options.venv_system_site_packages)) if mode in (TargetMode.SANITY, TargetMode.UNITS): - targets = controller_targets(mode, options, controller) + python = native_python(options) + + if python: + control_targets = [ControllerConfig(python=python)] + else: + control_targets = controller.get_default_targets(HostContext(controller_config=controller)) # Target sanity tests either have no Python requirements or manage their own virtual environments. - # Thus there is no point in setting up virtual environments ahead of time for them. + # Thus, there is no point in setting up virtual environments ahead of time for them. if mode == TargetMode.UNITS: targets = [ControllerConfig(python=VirtualPythonConfig(version=target.python.version, path=target.python.path, - system_site_packages=options.venv_system_site_packages)) for target in targets] + system_site_packages=options.venv_system_site_packages)) for target in control_targets] + else: + targets = t.cast(t.List[HostConfig], control_targets) else: targets = [ControllerConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages))] @@ -359,33 +375,34 @@ def get_legacy_host_config( if remote_config.controller_supported: if controller_python(options.python) or not options.python: - controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider) + controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, + arch=options.remote_arch) targets = controller_targets(mode, options, controller) else: controller_fallback = f'remote:{options.remote}', f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON - controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider) + controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch) targets = controller_targets(mode, options, controller) else: context, reason = f'--remote {options.remote}', FallbackReason.ENVIRONMENT controller = None - targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)] + targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)] elif mode == TargetMode.SHELL and options.remote.startswith('windows/'): if options.python and options.python not in CONTROLLER_PYTHON_VERSIONS: raise ControllerNotSupportedError(f'--python {options.python}') controller = OriginConfig(python=native_python(options)) - targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider)] + targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch)] else: if not options.python: raise PythonVersionUnspecifiedError(f'--remote {options.remote}') if controller_python(options.python): - controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider) + controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch) targets = controller_targets(mode, options, controller) else: context, reason = f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON controller = None - targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)] + targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)] if not controller: if docker_available(): @@ -443,22 +460,25 @@ def handle_non_posix_targets( """Return a list of non-POSIX targets if the target mode is non-POSIX.""" if mode == TargetMode.WINDOWS_INTEGRATION: if options.windows: - targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider) for version in options.windows] + targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider, arch=options.remote_arch) + for version in options.windows] else: targets = [WindowsInventoryConfig(path=options.inventory)] elif mode == TargetMode.NETWORK_INTEGRATION: if options.platform: - targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider) for platform in options.platform] + network_targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider, arch=options.remote_arch) for platform in options.platform] for platform, collection in options.platform_collection or []: - for entry in targets: + for entry in network_targets: if entry.platform == platform: entry.collection = collection for platform, connection in options.platform_connection or []: - for entry in targets: + for entry in network_targets: if entry.platform == platform: entry.connection = connection + + targets = t.cast(t.List[HostConfig], network_targets) else: targets = [NetworkInventoryConfig(path=options.inventory)] @@ -470,12 +490,14 @@ def default_targets( controller, # type: ControllerHostConfig ): # type: (...) -> t.List[HostConfig] """Return a list of default targets for the given target mode.""" + targets: t.List[HostConfig] + if mode == TargetMode.WINDOWS_INTEGRATION: targets = [WindowsInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.winrm')))] elif mode == TargetMode.NETWORK_INTEGRATION: targets = [NetworkInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.networking')))] elif mode.multiple_pythons: - targets = controller.get_default_targets(HostContext(controller_config=controller)) + targets = t.cast(t.List[HostConfig], controller.get_default_targets(HostContext(controller_config=controller))) else: targets = [ControllerConfig()] diff --git a/test/lib/ansible_test/_internal/cli/completers.py b/test/lib/ansible_test/_internal/cli/completers.py index a4b9c04f4e..278b106251 100644 --- a/test/lib/ansible_test/_internal/cli/completers.py +++ b/test/lib/ansible_test/_internal/cli/completers.py @@ -13,14 +13,19 @@ from .argparsing.argcompletion import ( ) -def complete_target(completer, prefix, parsed_args, **_): # type: (OptionCompletionFinder, str, argparse.Namespace, ...) -> t.List[str] +def complete_target(completer: OptionCompletionFinder, prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Perform completion for the targets configured for the command being parsed.""" matches = find_target_completion(parsed_args.targets_func, prefix, completer.list_mode) completer.disable_completion_mangling = completer.list_mode and len(matches) > 1 return matches -def complete_choices(choices, prefix, **_): # type: (t.List[str], str, ...) -> t.List[str] +def complete_choices(choices: t.List[str], prefix: str, **_) -> t.List[str]: """Perform completion using the provided choices.""" matches = [choice for choice in choices if choice.startswith(prefix)] return matches + + +def register_completer(action: argparse.Action, completer) -> None: + """Register the given completer with the specified action.""" + action.completer = completer # type: ignore[attr-defined] # intentionally using an attribute that does not exist diff --git a/test/lib/ansible_test/_internal/cli/environments.py b/test/lib/ansible_test/_internal/cli/environments.py index 3c0230ca1c..1495f8efcd 100644 --- a/test/lib/ansible_test/_internal/cli/environments.py +++ b/test/lib/ansible_test/_internal/cli/environments.py @@ -13,6 +13,10 @@ from ..constants import ( SUPPORTED_PYTHON_VERSIONS, ) +from ..util import ( + REMOTE_ARCHITECTURES, +) + from ..completion import ( docker_completion, network_completion, @@ -53,12 +57,17 @@ from ..config import ( from .completers import ( complete_choices, + register_completer, ) from .converters import ( key_value_type, ) +from .epilog import ( + get_epilog, +) + from ..ci import ( get_ci_provider, ) @@ -98,6 +107,8 @@ def add_environments( if not get_ci_provider().supports_core_ci_auth(): sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.') + sections.append(get_epilog(completer)) + parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.epilog = '\n\n'.join(sections) @@ -169,40 +180,40 @@ def add_composite_environment_options( if controller_mode == ControllerMode.NO_DELEGATION: composite_parser.set_defaults(controller=None) else: - composite_parser.add_argument( + register_completer(composite_parser.add_argument( '--controller', metavar='OPT', action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction), help='configuration for the controller', - ).completer = completer.completer + ), completer.completer) if target_mode == TargetMode.NO_TARGETS: composite_parser.set_defaults(targets=[]) elif target_mode == TargetMode.SHELL: group = composite_parser.add_mutually_exclusive_group() - group.add_argument( + register_completer(group.add_argument( '--target-posix', metavar='OPT', action=register_action_type(PosixSshTargetAction), help='configuration for the target', - ).completer = completer.completer + ), completer.completer) suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS - group.add_argument( + register_completer(group.add_argument( '--target-windows', metavar='OPT', action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction), help=suppress or 'configuration for the target', - ).completer = completer.completer + ), completer.completer) - group.add_argument( + register_completer(group.add_argument( '--target-network', metavar='OPT', action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction), help=suppress or 'configuration for the target', - ).completer = completer.completer + ), completer.completer) else: if target_mode.multiple_pythons: target_option = '--target-python' @@ -224,12 +235,12 @@ def add_composite_environment_options( target_action = target_actions[target_mode] - composite_parser.add_argument( + register_completer(composite_parser.add_argument( target_option, metavar='OPT', action=register_action_type(target_action), help=target_help, - ).completer = completer.completer + ), completer.completer) return action_types @@ -240,9 +251,8 @@ def add_legacy_environment_options( target_mode, # type: TargetMode ): """Add legacy options for controlling the test environment.""" - # noinspection PyTypeChecker - environment = parser.add_argument_group( - title='environment arguments (mutually exclusive with "composite environment arguments" below)') # type: argparse.ArgumentParser + environment: argparse.ArgumentParser = parser.add_argument_group( # type: ignore[assignment] # real type private + title='environment arguments (mutually exclusive with "composite environment arguments" below)') add_environments_python(environment, target_mode) add_environments_host(environment, controller_mode, target_mode) @@ -253,6 +263,8 @@ def add_environments_python( target_mode, # type: TargetMode ): # type: (...) -> None """Add environment arguments to control the Python version(s) used.""" + python_versions: t.Tuple[str, ...] + if target_mode.has_python: python_versions = SUPPORTED_PYTHON_VERSIONS else: @@ -278,8 +290,7 @@ def add_environments_host( target_mode # type: TargetMode ): # type: (...) -> None """Add environment arguments for the given host and argument modes.""" - # noinspection PyTypeChecker - environments_exclusive_group = environments_parser.add_mutually_exclusive_group() # type: argparse.ArgumentParser + environments_exclusive_group: argparse.ArgumentParser = environments_parser.add_mutually_exclusive_group() # type: ignore[assignment] # real type private add_environment_local(environments_exclusive_group) add_environment_venv(environments_exclusive_group, environments_parser) @@ -299,28 +310,28 @@ def add_environment_network( environments_parser, # type: argparse.ArgumentParser ): # type: (...) -> None """Add environment arguments for running on a windows host.""" - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--platform', metavar='PLATFORM', action='append', help='network platform/version', - ).completer = complete_network_platform + ), complete_network_platform) - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--platform-collection', type=key_value_type, metavar='PLATFORM=COLLECTION', action='append', help='collection used to test platform', - ).completer = complete_network_platform_collection + ), complete_network_platform_collection) - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--platform-connection', type=key_value_type, metavar='PLATFORM=CONNECTION', action='append', help='connection used to test platform', - ).completer = complete_network_platform_connection + ), complete_network_platform_connection) environments_parser.add_argument( '--inventory', @@ -333,12 +344,12 @@ def add_environment_windows( environments_parser, # type: argparse.ArgumentParser ): # type: (...) -> None """Add environment arguments for running on a windows host.""" - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--windows', metavar='VERSION', action='append', help='windows version', - ).completer = complete_windows + ), complete_windows) environments_parser.add_argument( '--inventory', @@ -386,6 +397,8 @@ def add_global_docker( docker_network=None, docker_terminate=None, prime_containers=False, + dev_systemd_debug=False, + dev_probe_cgroups=None, ) return @@ -417,6 +430,24 @@ def add_global_docker( help='download containers without running tests', ) + # Docker support isn't related to ansible-core-ci. + # However, ansible-core-ci support is a reasonable indicator that the user may need the `--dev-*` options. + suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS + + parser.add_argument( + '--dev-systemd-debug', + action='store_true', + help=suppress or 'enable systemd debugging in containers', + ) + + parser.add_argument( + '--dev-probe-cgroups', + metavar='DIR', + nargs='?', + const='', + help=suppress or 'probe container cgroups, with optional log dir', + ) + def add_environment_docker( exclusive_parser, # type: argparse.ArgumentParser @@ -429,13 +460,13 @@ def add_environment_docker( else: docker_images = sorted(filter_completion(docker_completion(), controller_only=True)) - exclusive_parser.add_argument( + register_completer(exclusive_parser.add_argument( '--docker', metavar='IMAGE', nargs='?', const='default', help='run from a docker container', - ).completer = functools.partial(complete_choices, docker_images) + ), functools.partial(complete_choices, docker_images)) environments_parser.add_argument( '--docker-privileged', @@ -474,12 +505,12 @@ def add_global_remote( suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS - parser.add_argument( + register_completer(parser.add_argument( '--remote-stage', metavar='STAGE', default='prod', help=suppress or 'remote stage to use: prod, dev', - ).completer = complete_remote_stage + ), complete_remote_stage) parser.add_argument( '--remote-endpoint', @@ -512,11 +543,11 @@ def add_environment_remote( suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS - exclusive_parser.add_argument( + register_completer(exclusive_parser.add_argument( '--remote', metavar='NAME', help=suppress or 'run from a remote instance', - ).completer = functools.partial(complete_choices, remote_platforms) + ), functools.partial(complete_choices, remote_platforms)) environments_parser.add_argument( '--remote-provider', @@ -525,25 +556,32 @@ def add_environment_remote( help=suppress or 'remote provider to use: %(choices)s', ) + environments_parser.add_argument( + '--remote-arch', + metavar='ARCH', + choices=REMOTE_ARCHITECTURES, + help=suppress or 'remote arch to use: %(choices)s', + ) + -def complete_remote_stage(prefix, **_): # type: (str, ...) -> t.List[str] +def complete_remote_stage(prefix: str, **_) -> t.List[str]: """Return a list of supported stages matching the given prefix.""" return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)] -def complete_windows(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line.""" return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)] -def complete_network_platform(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line.""" images = sorted(filter_completion(network_completion())) return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)] -def complete_network_platform_collection(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line.""" left = prefix.split('=')[0] images = sorted(set(image.platform for image in filter_completion(network_completion()).values())) @@ -551,7 +589,7 @@ def complete_network_platform_collection(prefix, parsed_args, **_): # type: (st return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])] -def complete_network_platform_connection(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line.""" left = prefix.split('=')[0] images = sorted(set(image.platform for image in filter_completion(network_completion()).values())) diff --git a/test/lib/ansible_test/_internal/cli/epilog.py b/test/lib/ansible_test/_internal/cli/epilog.py new file mode 100644 index 0000000000..3800ff1c0c --- /dev/null +++ b/test/lib/ansible_test/_internal/cli/epilog.py @@ -0,0 +1,23 @@ +"""Argument parsing epilog generation.""" +from __future__ import annotations + +from .argparsing import ( + CompositeActionCompletionFinder, +) + +from ..data import ( + data_context, +) + + +def get_epilog(completer: CompositeActionCompletionFinder) -> str: + """Generate and return the epilog to use for help output.""" + if completer.enabled: + epilog = 'Tab completion available using the "argcomplete" python package.' + else: + epilog = 'Install the "argcomplete" python package to enable tab completion.' + + if data_context().content.unsupported: + epilog += '\n\n' + data_context().explain_working_directory() + + return epilog diff --git a/test/lib/ansible_test/_internal/cli/parsers/__init__.py b/test/lib/ansible_test/_internal/cli/parsers/__init__.py index 25bac9167b..e870d9f8ca 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/__init__.py +++ b/test/lib/ansible_test/_internal/cli/parsers/__init__.py @@ -73,7 +73,7 @@ class DelegatedControllerParser(ControllerNamespaceParser, TypeParser): """Composite argument parser for the controller when delegation is supported.""" def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = dict( + parsers: t.Dict[str, Parser] = dict( origin=OriginParser(), docker=DockerParser(controller=True), ) @@ -99,7 +99,7 @@ class PosixTargetParser(TargetNamespaceParser, TypeParser): """Composite argument parser for a POSIX target.""" def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = dict( + parsers: t.Dict[str, Parser] = dict( controller=ControllerParser(), docker=DockerParser(controller=False), ) @@ -142,7 +142,7 @@ class WindowsTargetParser(TargetsNamespaceParser, TypeParser): def get_internal_parsers(self, targets): # type: (t.List[WindowsConfig]) -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = {} + parsers = {} # type: t.Dict[str, Parser] if self.allow_inventory and not targets: parsers.update( @@ -184,7 +184,7 @@ class NetworkTargetParser(TargetsNamespaceParser, TypeParser): def get_internal_parsers(self, targets): # type: (t.List[NetworkConfig]) -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = {} + parsers = {} # type: t.Dict[str, Parser] if self.allow_inventory and not targets: parsers.update( diff --git a/test/lib/ansible_test/_internal/cli/parsers/helpers.py b/test/lib/ansible_test/_internal/cli/parsers/helpers.py index 8dc7a65c58..03f3cb79bc 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/helpers.py +++ b/test/lib/ansible_test/_internal/cli/parsers/helpers.py @@ -27,7 +27,7 @@ def get_docker_pythons(name, controller, strict): # type: (str, bool, bool) -> available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS if not image_config: - return [] if strict else available_pythons + return [] if strict else list(available_pythons) supported_pythons = [python for python in image_config.supported_pythons if python in available_pythons] @@ -40,7 +40,7 @@ def get_remote_pythons(name, controller, strict): # type: (str, bool, bool) -> available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS if not platform_config: - return [] if strict else available_pythons + return [] if strict else list(available_pythons) supported_pythons = [python for python in platform_config.supported_pythons if python in available_pythons] @@ -54,6 +54,6 @@ def get_controller_pythons(controller_config, strict): # type: (HostConfig, boo elif isinstance(controller_config, PosixRemoteConfig): pythons = get_remote_pythons(controller_config.name, False, strict) else: - pythons = SUPPORTED_PYTHON_VERSIONS + pythons = list(SUPPORTED_PYTHON_VERSIONS) return pythons diff --git a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py index b22705f731..820f9c4b1c 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py +++ b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py @@ -10,15 +10,29 @@ from ...constants import ( SUPPORTED_PYTHON_VERSIONS, ) +from ...completion import ( + AuditMode, + CGroupVersion, +) + +from ...util import ( + REMOTE_ARCHITECTURES, +) + from ...host_configs import ( OriginConfig, ) +from ...become import ( + SUPPORTED_BECOME_METHODS, +) + from ..argparsing.parsers import ( AnyParser, BooleanParser, ChoicesParser, DocumentationState, + EnumValueChoicesParser, IntegerParser, KeyValueParser, Parser, @@ -95,6 +109,8 @@ class DockerKeyValueParser(KeyValueParser): return dict( python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default), seccomp=ChoicesParser(SECCOMP_CHOICES), + cgroup=EnumValueChoicesParser(CGroupVersion), + audit=EnumValueChoicesParser(AuditMode), privileged=BooleanParser(), memory=IntegerParser(), ) @@ -108,6 +124,8 @@ class DockerKeyValueParser(KeyValueParser): state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([ f' python={python_parser.document(state)}', f' seccomp={ChoicesParser(SECCOMP_CHOICES).document(state)}', + f' cgroup={EnumValueChoicesParser(CGroupVersion).document(state)}', + f' audit={EnumValueChoicesParser(AuditMode).document(state)}', f' privileged={BooleanParser().document(state)}', f' memory={IntegerParser().document(state)} # bytes', ]) @@ -125,7 +143,9 @@ class PosixRemoteKeyValueParser(KeyValueParser): def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser] """Return a dictionary of key names and value parsers.""" return dict( + become=ChoicesParser(list(SUPPORTED_BECOME_METHODS)), provider=ChoicesParser(REMOTE_PROVIDERS), + arch=ChoicesParser(REMOTE_ARCHITECTURES), python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default), ) @@ -136,7 +156,9 @@ class PosixRemoteKeyValueParser(KeyValueParser): section_name = 'remote options' state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([ + f' become={ChoicesParser(list(SUPPORTED_BECOME_METHODS)).document(state)}', f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}', + f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}', f' python={python_parser.document(state)}', ]) @@ -149,6 +171,7 @@ class WindowsRemoteKeyValueParser(KeyValueParser): """Return a dictionary of key names and value parsers.""" return dict( provider=ChoicesParser(REMOTE_PROVIDERS), + arch=ChoicesParser(REMOTE_ARCHITECTURES), ) def document(self, state): # type: (DocumentationState) -> t.Optional[str] @@ -157,6 +180,7 @@ class WindowsRemoteKeyValueParser(KeyValueParser): state.sections[f'target {section_name} (comma separated):'] = '\n'.join([ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}', + f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}', ]) return f'{{{section_name}}}' @@ -168,6 +192,7 @@ class NetworkRemoteKeyValueParser(KeyValueParser): """Return a dictionary of key names and value parsers.""" return dict( provider=ChoicesParser(REMOTE_PROVIDERS), + arch=ChoicesParser(REMOTE_ARCHITECTURES), collection=AnyParser(), connection=AnyParser(), ) @@ -178,7 +203,8 @@ class NetworkRemoteKeyValueParser(KeyValueParser): state.sections[f'target {section_name} (comma separated):'] = '\n'.join([ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}', - ' collection={collecton}', + f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}', + ' collection={collection}', ' connection={connection}', ]) diff --git a/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py index 1aae88216f..d09ab7cc21 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py +++ b/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py @@ -5,6 +5,7 @@ import typing as t from ...host_configs import ( NativePythonConfig, + PythonConfig, VirtualPythonConfig, ) @@ -18,6 +19,7 @@ from ..argparsing.parsers import ( Parser, ParserError, ParserState, + ParserBoundary, ) @@ -58,7 +60,7 @@ class PythonParser(Parser): The origin host and unknown environments assume all relevant Python versions are available. """ def __init__(self, - versions, # type: t.List[str] + versions, # type: t.Sequence[str] *, allow_default, # type: bool allow_venv, # type: bool @@ -85,9 +87,13 @@ class PythonParser(Parser): def parse(self, state): # type: (ParserState) -> t.Any """Parse the input from the given state and return the result.""" + boundary: ParserBoundary + with state.delimit('@/', required=False) as boundary: version = ChoicesParser(self.first_choices).parse(state) + python: PythonConfig + if version == 'venv': with state.delimit('@/', required=False) as boundary: version = ChoicesParser(self.venv_choices).parse(state) @@ -156,7 +162,7 @@ class SshConnectionParser(Parser): setattr(namespace, 'user', user) - with state.delimit(':', required=False) as colon: + with state.delimit(':', required=False) as colon: # type: ParserBoundary host = AnyParser(no_match_message=f'Expected {{host}} from: {self.EXPECTED_FORMAT}').parse(state) setattr(namespace, 'host', host) diff --git a/test/lib/ansible_test/_internal/commands/coverage/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/__init__.py index 50bc82632f..88128c46ea 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/__init__.py +++ b/test/lib/ansible_test/_internal/commands/coverage/__init__.py @@ -95,7 +95,16 @@ def run_coverage(args, host_state, output_file, command, cmd): # type: (Coverag cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd - intercept_python(args, host_state.controller_profile.python, cmd, env) + stdout, stderr = intercept_python(args, host_state.controller_profile.python, cmd, env, capture=True) + + stdout = (stdout or '').strip() + stderr = (stderr or '').strip() + + if stdout: + display.info(stdout) + + if stderr: + display.warning(stderr) def get_all_coverage_files(): # type: () -> t.List[str] @@ -152,7 +161,7 @@ def enumerate_python_arcs( modules, # type: t.Dict[str, str] collection_search_re, # type: t.Optional[t.Pattern] collection_sub_re, # type: t.Optional[t.Pattern] -): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]] +): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]], None, None] """Enumerate Python code coverage arcs in the given file.""" if os.path.getsize(path) == 0: display.warning('Empty coverage file: %s' % path, verbosity=2) @@ -193,7 +202,7 @@ def enumerate_powershell_lines( path, # type: str collection_search_re, # type: t.Optional[t.Pattern] collection_sub_re, # type: t.Optional[t.Pattern] -): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]]] +): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]], None, None] """Enumerate PowerShell code coverage lines in the given file.""" if os.path.getsize(path) == 0: display.warning('Empty coverage file: %s' % path, verbosity=2) @@ -298,7 +307,7 @@ class PathChecker: def __init__(self, args, collection_search_re=None): # type: (CoverageConfig, t.Optional[t.Pattern]) -> None self.args = args self.collection_search_re = collection_search_re - self.invalid_paths = [] + self.invalid_paths = [] # type: t.List[str] self.invalid_path_chars = 0 def check_path(self, path): # type: (str) -> bool diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py index db169fd7a0..16521bef4f 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py @@ -14,4 +14,4 @@ class CoverageAnalyzeConfig(CoverageConfig): # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used - self.info_stderr = True + self.display_stderr = True diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py index a39d12c825..267969886e 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py @@ -18,27 +18,22 @@ from .. import ( CoverageAnalyzeConfig, ) -if t.TYPE_CHECKING: - TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int]) - NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]] - IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]] - Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]] - Lines = t.Dict[str, t.Dict[int, t.Set[int]]] - TargetIndexes = t.Dict[str, int] - TargetSetIndexes = t.Dict[t.FrozenSet[int], int] +TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int]) +NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]] +IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]] +Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]] +Lines = t.Dict[str, t.Dict[int, t.Set[int]]] +TargetIndexes = t.Dict[str, int] +TargetSetIndexes = t.Dict[t.FrozenSet[int], int] class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig): """Configuration for the `coverage analyze targets` command.""" - def __init__(self, args): # type: (t.Any) -> None - super().__init__(args) - - self.info_stderr = True def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any] """Condense target indexes, arcs and lines into a compact report.""" - set_indexes = {} + set_indexes = {} # type: TargetSetIndexes arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items()) line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items()) @@ -95,6 +90,11 @@ def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t. ), verbosity=1) +def format_line(value): # type: (int) -> str + """Format line as a string.""" + return str(value) # putting this in a function keeps both pylint and mypy happy + + def format_arc(value): # type: (t.Tuple[int, int]) -> str """Format an arc tuple as a string.""" return '%d:%d' % value diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py index d68edc02b2..1ea9d59eb4 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py @@ -18,13 +18,12 @@ from . import ( write_report, ) -if t.TYPE_CHECKING: - from . import ( - Arcs, - IndexedPoints, - Lines, - TargetIndexes, - ) +from . import ( + Arcs, + IndexedPoints, + Lines, + TargetIndexes, +) class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig): diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py index 6ca6e6d33a..d928342460 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py @@ -19,6 +19,7 @@ from . import ( CoverageAnalyzeTargetsConfig, expand_indexes, format_arc, + format_line, read_report, ) @@ -43,7 +44,7 @@ def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTarg report = dict( arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc), - lines=expand_indexes(covered_path_lines, covered_targets, str), + lines=expand_indexes(covered_path_lines, covered_targets, format_line), ) if not args.explain: diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py index e5d2f50003..e5e0dff774 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py @@ -21,11 +21,10 @@ from . import ( write_report, ) -if t.TYPE_CHECKING: - from . import ( - NamedPoints, - TargetIndexes, - ) +from . import ( + NamedPoints, + TargetIndexes, +) class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig): diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py index 3f9bca74db..54b2516fc7 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py @@ -43,12 +43,11 @@ from . import ( write_report, ) -if t.TYPE_CHECKING: - from . import ( - Arcs, - Lines, - TargetIndexes, - ) +from . import ( + Arcs, + Lines, + TargetIndexes, +) class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig): @@ -68,7 +67,7 @@ def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTa raise Delegate(host_state) root = data_context().content.root - target_indexes = {} + target_indexes = {} # type: TargetIndexes arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, host_state, args.input_dir, target_indexes).items()) lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items()) report = make_report(target_indexes, arcs, lines) @@ -139,7 +138,7 @@ def analyze_powershell_coverage( def prune_invalid_filenames( args, # type: CoverageAnalyzeTargetsGenerateConfig results, # type: t.Dict[str, t.Any] - collection_search_re=None, # type: t.Optional[str] + collection_search_re=None, # type: t.Optional[t.Pattern] ): # type: (...) -> None """Remove invalid filenames from the given result set.""" path_checker = PathChecker(args, collection_search_re) diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py index 9b6d696dbe..f3cdfe5b95 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py @@ -24,11 +24,10 @@ from . import ( write_report, ) -if t.TYPE_CHECKING: - from . import ( - TargetIndexes, - IndexedPoints, - ) +from . import ( + TargetIndexes, + IndexedPoints, +) class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig): @@ -53,7 +52,7 @@ def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTar from_targets, from_path_arcs, from_path_lines = read_report(args.from_file) to_targets, to_path_arcs, to_path_lines = read_report(args.to_file) - target_indexes = {} + target_indexes = {} # type: TargetIndexes if args.only_gaps: arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists) @@ -74,7 +73,7 @@ def find_gaps( only_exists, # type: bool ): # type: (...) -> IndexedPoints """Find gaps in coverage between the from and to data sets.""" - target_data = {} + target_data = {} # type: IndexedPoints for from_path, from_points in from_data.items(): if only_exists and not os.path.isfile(to_bytes(from_path)): @@ -100,7 +99,7 @@ def find_missing( only_exists, # type: bool ): # type: (...) -> IndexedPoints """Find coverage in from_data not present in to_data (arcs or lines).""" - target_data = {} + target_data = {} # type: IndexedPoints for from_path, from_points in from_data.items(): if only_exists and not os.path.isfile(to_bytes(from_path)): diff --git a/test/lib/ansible_test/_internal/commands/coverage/combine.py b/test/lib/ansible_test/_internal/commands/coverage/combine.py index b240df461e..c93be27090 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/combine.py +++ b/test/lib/ansible_test/_internal/commands/coverage/combine.py @@ -18,11 +18,11 @@ from ...util import ( ANSIBLE_TEST_TOOLS_ROOT, display, ApplicationError, + raw_command, ) from ...util_common import ( ResultType, - run_command, write_json_file, write_json_test_results, ) @@ -189,7 +189,7 @@ def _command_coverage_combine_powershell(args): # type: (CoverageCombineConfig) cmd = ['pwsh', os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'coverage_stub.ps1')] cmd.extend(source_paths) - stubs = json.loads(run_command(args, cmd, capture=True, always=True)[0]) + stubs = json.loads(raw_command(cmd, capture=True)[0]) return dict((d['Path'], dict((line, 0) for line in d['Lines'])) for d in stubs) @@ -315,7 +315,6 @@ def get_coverage_group(args, coverage_file): # type: (CoverageCombineConfig, st """Return the name of the coverage group for the specified coverage file, or None if no group was found.""" parts = os.path.basename(coverage_file).split('=', 4) - # noinspection PyTypeChecker if len(parts) != 5 or not parts[4].startswith('coverage.'): return None diff --git a/test/lib/ansible_test/_internal/commands/coverage/xml.py b/test/lib/ansible_test/_internal/commands/coverage/xml.py index ed9603c28f..c498d1c2b2 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/xml.py +++ b/test/lib/ansible_test/_internal/commands/coverage/xml.py @@ -76,7 +76,7 @@ def _generate_powershell_xml(coverage_file): # type: (str) -> Element content_root = data_context().content.root is_ansible = data_context().content.is_ansible - packages = {} + packages = {} # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]] for path, results in coverage_info.items(): filename = os.path.splitext(os.path.basename(path))[0] @@ -131,7 +131,7 @@ def _generate_powershell_xml(coverage_file): # type: (str) -> Element return elem_coverage -def _add_cobertura_package(packages, package_name, package_data): # type: (SubElement, str, t.Dict[str, t.Dict[str, int]]) -> t.Tuple[int, int] +def _add_cobertura_package(packages, package_name, package_data): # type: (Element, str, t.Dict[str, t.Dict[str, int]]) -> t.Tuple[int, int] """Add a package element to the given packages element.""" elem_package = SubElement(packages, 'package') elem_classes = SubElement(elem_package, 'classes') diff --git a/test/lib/ansible_test/_internal/commands/env/__init__.py b/test/lib/ansible_test/_internal/commands/env/__init__.py index c625209c84..41a1d52090 100644 --- a/test/lib/ansible_test/_internal/commands/env/__init__.py +++ b/test/lib/ansible_test/_internal/commands/env/__init__.py @@ -17,9 +17,9 @@ from ...io import ( from ...util import ( display, - SubprocessError, get_ansible_version, get_available_python_versions, + ApplicationError, ) from ...util_common import ( @@ -30,8 +30,8 @@ from ...util_common import ( from ...docker_util import ( get_docker_command, - docker_info, - docker_version + get_docker_info, + get_docker_container_id, ) from ...constants import ( @@ -70,11 +70,14 @@ def show_dump_env(args): # type: (EnvConfig) -> None if not args.show and not args.dump: return + container_id = get_docker_container_id() + data = dict( ansible=dict( version=get_ansible_version(), ), docker=get_docker_details(args), + container_id=container_id, environ=os.environ.copy(), location=dict( pwd=os.environ.get('PWD', None), @@ -166,7 +169,7 @@ def show_dict(data, verbose, root_verbosity=0, path=None): # type: (t.Dict[str, display.info(indent + '%s: %s' % (key, value), verbosity=verbosity) -def get_docker_details(args): # type: (EnvConfig) -> t.Dict[str, str] +def get_docker_details(args): # type: (EnvConfig) -> t.Dict[str, t.Any] """Return details about docker.""" docker = get_docker_command() @@ -178,14 +181,12 @@ def get_docker_details(args): # type: (EnvConfig) -> t.Dict[str, str] executable = docker.executable try: - info = docker_info(args) - except SubprocessError as ex: - display.warning('Failed to collect docker info:\n%s' % ex) - - try: - version = docker_version(args) - except SubprocessError as ex: - display.warning('Failed to collect docker version:\n%s' % ex) + docker_info = get_docker_info(args) + except ApplicationError as ex: + display.warning(str(ex)) + else: + info = docker_info.info + version = docker_info.version docker_details = dict( executable=executable, diff --git a/test/lib/ansible_test/_internal/commands/integration/__init__.py b/test/lib/ansible_test/_internal/commands/integration/__init__.py index a9a49aa14b..2ae1e39c9d 100644 --- a/test/lib/ansible_test/_internal/commands/integration/__init__.py +++ b/test/lib/ansible_test/_internal/commands/integration/__init__.py @@ -98,6 +98,7 @@ from ...host_configs import ( from ...host_profiles import ( ControllerProfile, + ControllerHostProfile, HostProfile, PosixProfile, SshTargetHostProfile, @@ -134,7 +135,7 @@ def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTa """Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.""" targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) - dependency_map = {} + dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() @@ -159,7 +160,7 @@ def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTa def get_files_needed(target_dependencies): # type: (t.List[IntegrationTarget]) -> t.List[str] """Return a list of files needed by the given list of target dependencies.""" - files_needed = [] + files_needed = [] # type: t.List[str] for target_dependency in target_dependencies: files_needed += target_dependency.needs_file @@ -241,7 +242,7 @@ def integration_test_environment( args, # type: IntegrationConfig target, # type: IntegrationTarget inventory_path_src, # type: str -): # type: (...) -> t.ContextManager[IntegrationEnvironment] +): # type: (...) -> t.Iterator[IntegrationEnvironment] """Context manager that prepares the integration test environment and cleans it up.""" ansible_config_src = args.get_ansible_config() ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command) @@ -324,7 +325,7 @@ def integration_test_environment( display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2) if not args.explain: - shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) + shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) # type: ignore[arg-type] # incorrect type stub omits bytes path support for file_src, file_dst in file_copies: display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2) @@ -344,7 +345,7 @@ def integration_test_config_file( args, # type: IntegrationConfig env_config, # type: CloudEnvironmentConfig integration_dir, # type: str -): # type: (...) -> t.ContextManager[t.Optional[str]] +): # type: (...) -> t.Iterator[t.Optional[str]] """Context manager that provides a config file for integration tests, if needed.""" if not env_config: yield None @@ -361,7 +362,7 @@ def integration_test_config_file( config_file = json.dumps(config_vars, indent=4, sort_keys=True) - with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: + with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: # type: str filename = os.path.relpath(path, integration_dir) display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3) @@ -398,8 +399,8 @@ def create_inventory( def command_integration_filtered( args, # type: IntegrationConfig host_state, # type: HostState - targets, # type: t.Tuple[IntegrationTarget] - all_targets, # type: t.Tuple[IntegrationTarget] + targets, # type: t.Tuple[IntegrationTarget, ...] + all_targets, # type: t.Tuple[IntegrationTarget, ...] inventory_path, # type: str pre_target=None, # type: t.Optional[t.Callable[[IntegrationTarget], None]] post_target=None, # type: t.Optional[t.Callable[[IntegrationTarget], None]] @@ -413,7 +414,7 @@ def command_integration_filtered( all_targets_dict = dict((target.name, target) for target in all_targets) setup_errors = [] - setup_targets_executed = set() + setup_targets_executed = set() # type: t.Set[str] for target in all_targets: for setup_target in target.setup_once + target.setup_always: @@ -530,6 +531,10 @@ def command_integration_filtered( if not tries: raise + if target.retry_never: + display.warning(f'Skipping retry of test target "{target.name}" since it has been excluded from retries.') + raise + display.warning('Retrying test target "%s" with maximum verbosity.' % target.name) display.verbosity = args.verbosity = 6 @@ -538,7 +543,7 @@ def command_integration_filtered( failed.append(target) if args.continue_on_error: - display.error(ex) + display.error(str(ex)) continue display.notice('To resume at this test target, use the option: --start-at %s' % target.name) @@ -597,7 +602,7 @@ def command_integration_script( module_defaults=env_config.module_defaults, ), indent=4, sort_keys=True), verbosity=3) - with integration_test_environment(args, target, inventory_path) as test_env: + with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment cmd = ['./%s' % os.path.basename(target.script_path)] if args.verbosity: @@ -614,12 +619,12 @@ def command_integration_script( if env_config and env_config.env_vars: env.update(env_config.env_vars) - with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: + with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: # type: t.Optional[str] if config_path: cmd += ['-e', '@%s' % config_path] env.update(coverage_manager.get_environment(target.name, target.aliases)) - cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd) + cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False) def command_integration_role( @@ -673,7 +678,7 @@ def command_integration_role( module_defaults=env_config.module_defaults, ), indent=4, sort_keys=True), verbosity=3) - with integration_test_environment(args, target, inventory_path) as test_env: + with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment if os.path.exists(test_env.vars_file): vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir)) @@ -738,14 +743,14 @@ def command_integration_role( env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir env.update(coverage_manager.get_environment(target.name, target.aliases)) - cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd) + cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False) def run_setup_targets( args, # type: IntegrationConfig host_state, # type: HostState test_dir, # type: str - target_names, # type: t.List[str] + target_names, # type: t.Sequence[str] targets_dict, # type: t.Dict[str, IntegrationTarget] targets_executed, # type: t.Set[str] inventory_path, # type: str @@ -956,13 +961,10 @@ def command_integration_filter(args, # type: TIntegrationConfig return host_state, internal_targets -def requirements(args, host_state): # type: (IntegrationConfig, HostState) -> None - """Install requirements.""" - target_profile = host_state.target_profiles[0] - - configure_pypi_proxy(args, host_state.controller_profile) # integration, windows-integration, network-integration - - if isinstance(target_profile, PosixProfile) and not isinstance(target_profile, ControllerProfile): - configure_pypi_proxy(args, target_profile) # integration - - install_requirements(args, host_state.controller_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration +def requirements(host_profile: HostProfile) -> None: + """Install requirements after bootstrapping and delegation.""" + if isinstance(host_profile, ControllerHostProfile) and host_profile.controller: + configure_pypi_proxy(host_profile.args, host_profile) # integration, windows-integration, network-integration + install_requirements(host_profile.args, host_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration + elif isinstance(host_profile, PosixProfile) and not isinstance(host_profile, ControllerProfile): + configure_pypi_proxy(host_profile.args, host_profile) # integration diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py index 70f8afafe6..5afde048b3 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py @@ -59,8 +59,8 @@ def get_cloud_plugins(): # type: () -> t.Tuple[t.Dict[str, t.Type[CloudProvider """Import cloud plugins and load them into the plugin dictionaries.""" import_plugins('commands/integration/cloud') - providers = {} - environments = {} + providers = {} # type: t.Dict[str, t.Type[CloudProvider]] + environments = {} # type: t.Dict[str, t.Type[CloudEnvironment]] load_plugins(CloudProvider, providers) load_plugins(CloudEnvironment, environments) @@ -134,7 +134,7 @@ def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[Integratio if args.metadata.cloud_config is not None: return [] # cloud filter already performed prior to delegation - exclude = [] + exclude = [] # type: t.List[str] for provider in get_cloud_providers(args, targets): provider.filter(targets, exclude) @@ -206,7 +206,7 @@ class CloudBase(metaclass=abc.ABCMeta): @property def setup_executed(self): # type: () -> bool """True if setup has been executed, otherwise False.""" - return self._get_cloud_config(self._SETUP_EXECUTED, False) + return t.cast(bool, self._get_cloud_config(self._SETUP_EXECUTED, False)) @setup_executed.setter def setup_executed(self, value): # type: (bool) -> None @@ -216,7 +216,7 @@ class CloudBase(metaclass=abc.ABCMeta): @property def config_path(self): # type: () -> str """Path to the configuration file.""" - return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH)) + return os.path.join(data_context().content.root, str(self._get_cloud_config(self._CONFIG_PATH))) @config_path.setter def config_path(self, value): # type: (str) -> None @@ -226,7 +226,7 @@ class CloudBase(metaclass=abc.ABCMeta): @property def resource_prefix(self): # type: () -> str """Resource prefix.""" - return self._get_cloud_config(self._RESOURCE_PREFIX) + return str(self._get_cloud_config(self._RESOURCE_PREFIX)) @resource_prefix.setter def resource_prefix(self, value): # type: (str) -> None @@ -236,7 +236,7 @@ class CloudBase(metaclass=abc.ABCMeta): @property def managed(self): # type: () -> bool """True if resources are managed by ansible-test, otherwise False.""" - return self._get_cloud_config(self._MANAGED) + return t.cast(bool, self._get_cloud_config(self._MANAGED)) @managed.setter def managed(self, value): # type: (bool) -> None diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py b/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py index 94e60667c3..48aef72751 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py @@ -21,6 +21,7 @@ from ....target import ( from ....core_ci import ( AnsibleCoreCI, + CloudResource, ) from ....host_configs import ( @@ -91,7 +92,7 @@ class AwsCloudProvider(CloudProvider): def _create_ansible_core_ci(self): # type: () -> AnsibleCoreCI """Return an AWS instance of AnsibleCoreCI.""" - return AnsibleCoreCI(self.args, 'aws', 'aws', 'aws', persist=False) + return AnsibleCoreCI(self.args, CloudResource(platform='aws')) class AwsCloudEnvironment(CloudEnvironment): @@ -104,9 +105,8 @@ class AwsCloudEnvironment(CloudEnvironment): ansible_vars = dict( resource_prefix=self.resource_prefix, tiny_prefix=uuid.uuid4().hex[0:12] - ) + ) # type: t.Dict[str, t.Any] - # noinspection PyTypeChecker ansible_vars.update(dict(parser.items('default'))) display.sensitive.add(ansible_vars.get('aws_secret_key')) diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py b/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py index 002fa581db..a6c7156aa3 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py @@ -29,6 +29,7 @@ from ....http import ( from ....core_ci import ( AnsibleCoreCI, + CloudResource, ) from . import ( @@ -45,7 +46,7 @@ class AzureCloudProvider(CloudProvider): def __init__(self, args): # type: (IntegrationConfig) -> None super().__init__(args) - self.aci = None + self.aci = None # type: t.Optional[AnsibleCoreCI] self.uses_config = True @@ -133,7 +134,7 @@ class AzureCloudProvider(CloudProvider): def _create_ansible_core_ci(self): # type: () -> AnsibleCoreCI """Return an Azure instance of AnsibleCoreCI.""" - return AnsibleCoreCI(self.args, 'azure', 'azure', 'azure', persist=False) + return AnsibleCoreCI(self.args, CloudResource(platform='azure')) class AzureCloudEnvironment(CloudEnvironment): diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py b/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py index f20a7d887e..8ffcabfb32 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py @@ -106,7 +106,7 @@ class CsCloudProvider(CloudProvider): # apply work-around for OverlayFS issue # https://github.com/docker/for-linux/issues/72#issuecomment-319904698 - docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';']) + docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'], capture=True) if self.args.explain: values = dict( diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py b/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py index b4ca48f75f..86a38fef24 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py @@ -85,8 +85,8 @@ class ForemanEnvironment(CloudEnvironment): def get_environment_config(self): # type: () -> CloudEnvironmentConfig """Return environment configuration for use in the test environment after delegation.""" env_vars = dict( - FOREMAN_HOST=self._get_cloud_config('FOREMAN_HOST'), - FOREMAN_PORT=self._get_cloud_config('FOREMAN_PORT'), + FOREMAN_HOST=str(self._get_cloud_config('FOREMAN_HOST')), + FOREMAN_PORT=str(self._get_cloud_config('FOREMAN_PORT')), ) return CloudEnvironmentConfig( diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py b/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py index de58cbf5bc..302a291915 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py @@ -145,8 +145,8 @@ class GalaxyEnvironment(CloudEnvironment): """Galaxy environment plugin. Updates integration test environment after delegation.""" def get_environment_config(self): # type: () -> CloudEnvironmentConfig """Return environment configuration for use in the test environment after delegation.""" - pulp_user = self._get_cloud_config('PULP_USER') - pulp_password = self._get_cloud_config('PULP_PASSWORD') + pulp_user = str(self._get_cloud_config('PULP_USER')) + pulp_password = str(self._get_cloud_config('PULP_PASSWORD')) pulp_host = self._get_cloud_config('PULP_HOST') galaxy_port = self._get_cloud_config('GALAXY_PORT') pulp_port = self._get_cloud_config('PULP_PORT') diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py b/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py index 28b07e7230..6912aff36d 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py @@ -18,6 +18,7 @@ from ....target import ( from ....core_ci import ( AnsibleCoreCI, + CloudResource, ) from . import ( @@ -78,7 +79,7 @@ class HcloudCloudProvider(CloudProvider): def _create_ansible_core_ci(self): # type: () -> AnsibleCoreCI """Return a Heztner instance of AnsibleCoreCI.""" - return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', 'hetzner', persist=False) + return AnsibleCoreCI(self.args, CloudResource(platform='hetzner')) class HcloudCloudEnvironment(CloudEnvironment): diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py b/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py index 2d8217e99c..00c62b76e6 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py @@ -87,6 +87,6 @@ class HttptesterEnvironment(CloudEnvironment): return CloudEnvironmentConfig( env_vars=dict( HTTPTESTER='1', # backwards compatibility for tests intended to work with or without HTTP Tester - KRB5_PASSWORD=self._get_cloud_config(KRB5_PASSWORD_ENV), + KRB5_PASSWORD=str(self._get_cloud_config(KRB5_PASSWORD_ENV)), ) ) diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py b/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py index fb69b9b212..2093b461c8 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py @@ -107,14 +107,14 @@ class VcenterEnvironment(CloudEnvironment): ansible_vars.update(dict(parser.items('DEFAULT', raw=True))) except KeyError: # govcsim env_vars = dict( - VCENTER_HOSTNAME=self._get_cloud_config('vcenter_hostname'), + VCENTER_HOSTNAME=str(self._get_cloud_config('vcenter_hostname')), VCENTER_USERNAME='user', VCENTER_PASSWORD='pass', ) ansible_vars = dict( - vcsim=self._get_cloud_config('vcenter_hostname'), - vcenter_hostname=self._get_cloud_config('vcenter_hostname'), + vcsim=str(self._get_cloud_config('vcenter_hostname')), + vcenter_hostname=str(self._get_cloud_config('vcenter_hostname')), vcenter_username='user', vcenter_password='pass', ) diff --git a/test/lib/ansible_test/_internal/commands/integration/coverage.py b/test/lib/ansible_test/_internal/commands/integration/coverage.py index c36b440366..dd885c30f9 100644 --- a/test/lib/ansible_test/_internal/commands/integration/coverage.py +++ b/test/lib/ansible_test/_internal/commands/integration/coverage.py @@ -33,6 +33,7 @@ from ...util import ( get_type_map, remove_tree, sanitize_host_name, + verified_chmod, ) from ...util_common import ( @@ -118,7 +119,7 @@ class CoverageHandler(t.Generic[THostConfig], metaclass=abc.ABCMeta): def run_playbook(self, playbook, variables): # type: (str, t.Dict[str, str]) -> None """Run the specified playbook using the current inventory.""" self.create_inventory() - run_playbook(self.args, self.inventory_path, playbook, variables) + run_playbook(self.args, self.inventory_path, playbook, capture=False, variables=variables) class PosixCoverageHandler(CoverageHandler[PosixConfig]): @@ -166,9 +167,9 @@ class PosixCoverageHandler(CoverageHandler[PosixConfig]): write_text_file(coverage_config_path, coverage_config, create_directories=True) - os.chmod(coverage_config_path, MODE_FILE) + verified_chmod(coverage_config_path, MODE_FILE) os.mkdir(coverage_output_path) - os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE) + verified_chmod(coverage_output_path, MODE_DIRECTORY_WRITE) def setup_target(self): """Perform setup for code coverage on the target.""" @@ -271,7 +272,7 @@ class WindowsCoverageHandler(CoverageHandler[WindowsConfig]): @property def is_active(self): # type: () -> bool """True if the handler should be used, otherwise False.""" - return self.profiles and not self.args.coverage_check + return bool(self.profiles) and not self.args.coverage_check def setup(self): # type: () -> None """Perform setup for code coverage.""" diff --git a/test/lib/ansible_test/_internal/commands/integration/filters.py b/test/lib/ansible_test/_internal/commands/integration/filters.py index 0396ce9231..63c7c6b5b1 100644 --- a/test/lib/ansible_test/_internal/commands/integration/filters.py +++ b/test/lib/ansible_test/_internal/commands/integration/filters.py @@ -10,6 +10,7 @@ from ...config import ( from ...util import ( cache, + detect_architecture, display, get_type_map, ) @@ -108,19 +109,19 @@ class TargetFilter(t.Generic[THostConfig], metaclass=abc.ABCMeta): if not self.allow_destructive and not self.config.is_managed: override_destructive = set(target for target in self.include_targets if target.startswith('destructive/')) - override = [target.name for target in targets if override_destructive & set(target.skips)] + override = [target.name for target in targets if override_destructive & set(target.aliases)] self.skip('destructive', 'which require --allow-destructive or prefixing with "destructive/" to run on unmanaged hosts', targets, exclude, override) if not self.args.allow_disabled: override_disabled = set(target for target in self.args.include if target.startswith('disabled/')) - override = [target.name for target in targets if override_disabled & set(target.skips)] + override = [target.name for target in targets if override_disabled & set(target.aliases)] self.skip('disabled', 'which require --allow-disabled or prefixing with "disabled/"', targets, exclude, override) if not self.args.allow_unsupported: override_unsupported = set(target for target in self.args.include if target.startswith('unsupported/')) - override = [target.name for target in targets if override_unsupported & set(target.skips)] + override = [target.name for target in targets if override_unsupported & set(target.aliases)] self.skip('unsupported', 'which require --allow-unsupported or prefixing with "unsupported/"', targets, exclude, override) @@ -130,7 +131,7 @@ class TargetFilter(t.Generic[THostConfig], metaclass=abc.ABCMeta): if self.args.allow_unstable_changed: override_unstable |= set(self.args.metadata.change_description.focused_targets or []) - override = [target.name for target in targets if override_unstable & set(target.skips)] + override = [target.name for target in targets if override_unstable & set(target.aliases)] self.skip('unstable', 'which require --allow-unstable or prefixing with "unstable/"', targets, exclude, override) @@ -223,6 +224,14 @@ class NetworkInventoryTargetFilter(TargetFilter[NetworkInventoryConfig]): class OriginTargetFilter(PosixTargetFilter[OriginConfig]): """Target filter for localhost.""" + def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], t.Set[str]) -> None + """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list.""" + super().filter_targets(targets, exclude) + + arch = detect_architecture(self.config.python.path) + + if arch: + self.skip(f'skip/{arch}', f'which are not supported by {arch}', targets, exclude) @cache @@ -247,10 +256,7 @@ def get_target_filter(args, configs, controller): # type: (IntegrationConfig, t def get_remote_skip_aliases(config): # type: (RemoteConfig) -> t.Dict[str, str] """Return a dictionary of skip aliases and the reason why they apply.""" - if isinstance(config, PosixRemoteConfig): - return get_platform_skip_aliases(config.platform, config.version, config.arch) - - return get_platform_skip_aliases(config.platform, config.version, None) + return get_platform_skip_aliases(config.platform, config.version, config.arch) def get_platform_skip_aliases(platform, version, arch): # type: (str, str, t.Optional[str]) -> t.Dict[str, str] diff --git a/test/lib/ansible_test/_internal/commands/sanity/__init__.py b/test/lib/ansible_test/_internal/commands/sanity/__init__.py index 8c1340f2fc..c5008193de 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/__init__.py +++ b/test/lib/ansible_test/_internal/commands/sanity/__init__.py @@ -142,7 +142,7 @@ def command_sanity(args): # type: (SanityConfig) -> None if not targets.include: raise AllTargetsSkipped() - tests = sanity_get_tests() + tests = list(sanity_get_tests()) if args.test: disabled = [] @@ -157,6 +157,8 @@ def command_sanity(args): # type: (SanityConfig) -> None targets_use_pypi = any(isinstance(test, SanityMultipleVersion) and test.needs_pypi for test in tests) and not args.list_tests host_state = prepare_profiles(args, targets_use_pypi=targets_use_pypi) # sanity + get_content_config(args) # make sure content config has been parsed prior to delegation + if args.delegate: raise Delegate(host_state=host_state, require=changes, exclude=args.exclude) @@ -170,9 +172,11 @@ def command_sanity(args): # type: (SanityConfig) -> None total = 0 failed = [] + result: t.Optional[TestResult] + for test in tests: if args.list_tests: - display.info(test.name) + print(test.name) # display goes to stderr, this should be on stdout continue for version in SUPPORTED_PYTHON_VERSIONS: @@ -201,19 +205,19 @@ def command_sanity(args): # type: (SanityConfig) -> None else: raise Exception('Unsupported test type: %s' % type(test)) - all_targets = targets.targets + all_targets = list(targets.targets) if test.all_targets: - usable_targets = targets.targets + usable_targets = list(targets.targets) elif test.no_targets: - usable_targets = tuple() + usable_targets = [] else: - usable_targets = targets.include + usable_targets = list(targets.include) all_targets = SanityTargets.filter_and_inject_targets(test, all_targets) usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets) - usable_targets = sorted(test.filter_targets_by_version(list(usable_targets), version)) + usable_targets = sorted(test.filter_targets_by_version(args, list(usable_targets), version)) usable_targets = settings.filter_skipped_targets(usable_targets) sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets)) @@ -355,12 +359,12 @@ class SanityIgnoreParser: for python_version in test.supported_python_versions: test_name = '%s-%s' % (test.name, python_version) - paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(test_targets, python_version)) + paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, python_version)) tests_by_name[test_name] = test else: unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS)) - paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(test_targets, '')) + paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, '')) tests_by_name[test.name] = test for line_no, line in enumerate(lines, start=1): @@ -503,12 +507,15 @@ class SanityIgnoreParser: def load(args): # type: (SanityConfig) -> SanityIgnoreParser """Return the current SanityIgnore instance, initializing it if needed.""" try: - return SanityIgnoreParser.instance + return SanityIgnoreParser.instance # type: ignore[attr-defined] except AttributeError: pass - SanityIgnoreParser.instance = SanityIgnoreParser(args) - return SanityIgnoreParser.instance + instance = SanityIgnoreParser(args) + + SanityIgnoreParser.instance = instance # type: ignore[attr-defined] + + return instance class SanityIgnoreProcessor: @@ -571,7 +578,7 @@ class SanityIgnoreProcessor: def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage] """Return error messages related to issues with the file.""" - messages = [] + messages = [] # type: t.List[SanityMessage] # unused errors @@ -621,7 +628,7 @@ class SanityFailure(TestFailure): self, test, # type: str python_version=None, # type: t.Optional[str] - messages=None, # type: t.Optional[t.List[SanityMessage]] + messages=None, # type: t.Optional[t.Sequence[SanityMessage]] summary=None, # type: t.Optional[str] ): # type: (...) -> None super().__init__(COMMAND, test, python_version, messages, summary) @@ -633,7 +640,7 @@ class SanityMessage(TestMessage): class SanityTargets: """Sanity test target information.""" - def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None + def __init__(self, targets, include): # type: (t.Tuple[TestTarget, ...], t.Tuple[TestTarget, ...]) -> None self.targets = targets self.include = include @@ -671,11 +678,13 @@ class SanityTargets: def get_targets(): # type: () -> t.Tuple[TestTarget, ...] """Return a tuple of sanity test targets. Uses a cached version when available.""" try: - return SanityTargets.get_targets.targets + return SanityTargets.get_targets.targets # type: ignore[attr-defined] except AttributeError: - SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets())) + targets = tuple(sorted(walk_sanity_targets())) + + SanityTargets.get_targets.targets = targets # type: ignore[attr-defined] - return SanityTargets.get_targets.targets + return targets class SanityTest(metaclass=abc.ABCMeta): @@ -695,7 +704,7 @@ class SanityTest(metaclass=abc.ABCMeta): # Because these errors can be unpredictable they behave differently than normal error codes: # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors. # * They cannot be ignored. This is done to maintain the integrity of the ignore system. - self.optional_error_codes = set() + self.optional_error_codes = set() # type: t.Set[str] @property def error_code(self): # type: () -> t.Optional[str] @@ -749,7 +758,7 @@ class SanityTest(metaclass=abc.ABCMeta): raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name) - def filter_targets_by_version(self, targets, python_version): # type: (t.List[TestTarget], str) -> t.List[TestTarget] + def filter_targets_by_version(self, args, targets, python_version): # type: (SanityConfig, t.List[TestTarget], str) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version.""" del python_version # python_version is not used here, but derived classes may make use of it @@ -757,7 +766,7 @@ class SanityTest(metaclass=abc.ABCMeta): if self.py2_compat: # This sanity test is a Python 2.x compatibility test. - content_config = get_content_config() + content_config = get_content_config(args) if content_config.py2_support: # This collection supports Python 2.x. @@ -938,6 +947,7 @@ class SanityCodeSmellTest(SanitySingleVersion): cmd = [python.path, self.path] env = ansible_environment(args, color=False) + env.update(PYTHONUTF8='1') # force all code-smell sanity tests to run with Python UTF-8 Mode enabled pattern = None data = None @@ -952,7 +962,7 @@ class SanityCodeSmellTest(SanitySingleVersion): elif self.output == 'path-message': pattern = '^(?P<path>[^:]*): (?P<message>.*)$' else: - pattern = ApplicationError('Unsupported output type: %s' % self.output) + raise ApplicationError('Unsupported output type: %s' % self.output) if not self.no_targets: data = '\n'.join(paths) @@ -1041,15 +1051,15 @@ class SanityMultipleVersion(SanityTest, metaclass=abc.ABCMeta): """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return SUPPORTED_PYTHON_VERSIONS - def filter_targets_by_version(self, targets, python_version): # type: (t.List[TestTarget], str) -> t.List[TestTarget] + def filter_targets_by_version(self, args, targets, python_version): # type: (SanityConfig, t.List[TestTarget], str) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version.""" if not python_version: raise Exception('python_version is required to filter multi-version tests') - targets = super().filter_targets_by_version(targets, python_version) + targets = super().filter_targets_by_version(args, targets, python_version) if python_version in REMOTE_ONLY_PYTHON_VERSIONS: - content_config = get_content_config() + content_config = get_content_config(args) if python_version not in content_config.modules.python_versions: # when a remote-only python version is not supported there are no paths to test diff --git a/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py index 82d9f75133..f542a17186 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py +++ b/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py @@ -11,6 +11,7 @@ from . import ( SanityFailure, SanitySuccess, SanityTargets, + SanityMessage, ) from ...test import ( @@ -77,8 +78,8 @@ class AnsibleDocTest(SanitySingleVersion): paths = [target.path for target in targets.include] - doc_targets = collections.defaultdict(list) - target_paths = collections.defaultdict(dict) + doc_targets = collections.defaultdict(list) # type: t.Dict[str, t.List[str]] + target_paths = collections.defaultdict(dict) # type: t.Dict[str, t.Dict[str, str]] remap_types = dict( modules='module', @@ -97,7 +98,7 @@ class AnsibleDocTest(SanitySingleVersion): target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path env = ansible_environment(args, color=False) - error_messages = [] + error_messages = [] # type: t.List[SanityMessage] for doc_type in sorted(doc_targets): for format_option in [None, '--json']: diff --git a/test/lib/ansible_test/_internal/commands/sanity/ignores.py b/test/lib/ansible_test/_internal/commands/sanity/ignores.py index 9a39955ac5..867243adfe 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/ignores.py +++ b/test/lib/ansible_test/_internal/commands/sanity/ignores.py @@ -2,6 +2,7 @@ from __future__ import annotations import os +import typing as t from . import ( SanityFailure, @@ -38,7 +39,7 @@ class IgnoresTest(SanityVersionNeutral): def test(self, args, targets): # type: (SanityConfig, SanityTargets) -> TestResult sanity_ignore = SanityIgnoreParser.load(args) - messages = [] + messages = [] # type: t.List[SanityMessage] # parse errors diff --git a/test/lib/ansible_test/_internal/commands/sanity/import.py b/test/lib/ansible_test/_internal/commands/sanity/import.py index aa0239d522..28619e6f00 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/import.py +++ b/test/lib/ansible_test/_internal/commands/sanity/import.py @@ -111,7 +111,7 @@ class ImportTest(SanityMultipleVersion): try: install_requirements(args, python, virtualenv=True, controller=False) # sanity (import) except PipUnavailableError as ex: - display.warning(ex) + display.warning(str(ex)) temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import') diff --git a/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py index 3ff9895988..bc96b684f8 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py +++ b/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py @@ -1,6 +1,7 @@ """Sanity test to check integration test aliases.""" from __future__ import annotations +import dataclasses import json import textwrap import os @@ -128,7 +129,7 @@ class IntegrationAliasesTest(SanitySingleVersion): def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]] """Return a dictionary of CI test names and their group(s).""" if not self._ci_test_groups: - test_groups = {} + test_groups = {} # type: t.Dict[str, t.Set[int]] for stage in self._ci_config['stages']: for job in stage['jobs']: @@ -210,7 +211,7 @@ class IntegrationAliasesTest(SanitySingleVersion): path=self.CI_YML, )]) - results = dict( + results = Results( comments=[], labels={}, ) @@ -218,7 +219,7 @@ class IntegrationAliasesTest(SanitySingleVersion): self.load_ci_config(python) self.check_changes(args, results) - write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results) + write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results.__dict__) messages = [] @@ -318,6 +319,9 @@ class IntegrationAliasesTest(SanitySingleVersion): messages = [] for path in unassigned_paths: + if path == 'test/integration/targets/ansible-test-container': + continue # special test target which uses group 6 -- nothing else should be in that group + messages.append(SanityMessage(unassigned_message, '%s/aliases' % path)) for path in conflicting_paths: @@ -325,8 +329,8 @@ class IntegrationAliasesTest(SanitySingleVersion): return messages - def check_changes(self, args, results): # type: (SanityConfig, t.Dict[str, t.Any]) -> None - """Check changes and store results in the provided results dictionary.""" + def check_changes(self, args, results): # type: (SanityConfig, Results) -> None + """Check changes and store results in the provided result dictionary.""" integration_targets = list(walk_integration_targets()) module_targets = list(walk_module_targets()) @@ -370,8 +374,8 @@ class IntegrationAliasesTest(SanitySingleVersion): unsupported_tests=bool(unsupported_targets), ) - results['comments'] += comments - results['labels'].update(labels) + results.comments += comments + results.labels.update(labels) def format_comment(self, template, targets): # type: (str, t.List[str]) -> t.Optional[str] """Format and return a comment based on the given template and targets, or None if there are no targets.""" @@ -388,3 +392,10 @@ class IntegrationAliasesTest(SanitySingleVersion): message = textwrap.dedent(template).strip().format(**data) return message + + +@dataclasses.dataclass +class Results: + """Check results.""" + comments: t.List[str] + labels: t.Dict[str, bool] diff --git a/test/lib/ansible_test/_internal/commands/sanity/pep8.py b/test/lib/ansible_test/_internal/commands/sanity/pep8.py index 71241c913f..2610e730d9 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/pep8.py +++ b/test/lib/ansible_test/_internal/commands/sanity/pep8.py @@ -92,7 +92,7 @@ class Pep8Test(SanitySingleVersion): else: results = [] - results = [SanityMessage( + messages = [SanityMessage( message=r['message'], path=r['path'], line=int(r['line']), @@ -101,7 +101,7 @@ class Pep8Test(SanitySingleVersion): code=r['code'], ) for r in results] - errors = settings.process_errors(results, paths) + errors = settings.process_errors(messages, paths) if errors: return SanityFailure(self.name, messages=errors) diff --git a/test/lib/ansible_test/_internal/commands/sanity/pylint.py b/test/lib/ansible_test/_internal/commands/sanity/pylint.py index a4322f0308..eafc5d5e0b 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/pylint.py +++ b/test/lib/ansible_test/_internal/commands/sanity/pylint.py @@ -142,7 +142,7 @@ class PylintTest(SanitySingleVersion): if data_context().content.collection: try: - collection_detail = get_collection_detail(args, python) + collection_detail = get_collection_detail(python) if not collection_detail.version: display.warning('Skipping pylint collection version checks since no collection version was found.') diff --git a/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py b/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py index 0eccc01f9c..49a025c99d 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py +++ b/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py @@ -91,7 +91,7 @@ class ValidateModulesTest(SanitySingleVersion): cmd.extend(['--collection', data_context().content.collection.directory]) try: - collection_detail = get_collection_detail(args, python) + collection_detail = get_collection_detail(python) if collection_detail.version: cmd.extend(['--collection-version', collection_detail.version]) diff --git a/test/lib/ansible_test/_internal/commands/shell/__init__.py b/test/lib/ansible_test/_internal/commands/shell/__init__.py index 7364819e0c..099734df59 100644 --- a/test/lib/ansible_test/_internal/commands/shell/__init__.py +++ b/test/lib/ansible_test/_internal/commands/shell/__init__.py @@ -2,11 +2,15 @@ from __future__ import annotations import os +import sys import typing as t from ...util import ( ApplicationError, + OutputStream, display, + SubprocessError, + HostConnectionError, ) from ...config import ( @@ -18,6 +22,7 @@ from ...executor import ( ) from ...connections import ( + Connection, LocalConnection, SshConnection, ) @@ -37,12 +42,20 @@ from ...host_configs import ( OriginConfig, ) +from ...inventory import ( + create_controller_inventory, + create_posix_inventory, +) + def command_shell(args): # type: (ShellConfig) -> None """Entry point for the `shell` command.""" if args.raw and isinstance(args.targets[0], ControllerConfig): raise ApplicationError('The --raw option has no effect on the controller.') + if not args.export and not args.cmd and not sys.stdin.isatty(): + raise ApplicationError('Standard input must be a TTY to launch a shell.') + host_state = prepare_profiles(args, skip_setup=args.raw) # shell if args.delegate: @@ -55,13 +68,31 @@ def command_shell(args): # type: (ShellConfig) -> None if isinstance(target_profile, ControllerProfile): # run the shell locally unless a target was requested - con = LocalConnection(args) + con = LocalConnection(args) # type: Connection + + if args.export: + display.info('Configuring controller inventory.', verbosity=1) + create_controller_inventory(args, args.export, host_state.controller_profile) else: # a target was requested, connect to it over SSH con = target_profile.get_controller_target_connections()[0] + if args.export: + display.info('Configuring target inventory.', verbosity=1) + create_posix_inventory(args, args.export, host_state.target_profiles, True) + + if args.export: + return + + if args.cmd: + # Running a command is assumed to be non-interactive. Only a shell (no command) is interactive. + # If we want to support interactive commands in the future, we'll need an `--interactive` command line option. + # Command stderr output is allowed to mix with our own output, which is all sent to stderr. + con.run(args.cmd, capture=False, interactive=False, output_stream=OutputStream.ORIGINAL) + return + if isinstance(con, SshConnection) and args.raw: - cmd = [] + cmd = [] # type: t.List[str] elif isinstance(target_profile, PosixProfile): cmd = [] @@ -86,4 +117,19 @@ def command_shell(args): # type: (ShellConfig) -> None else: cmd = [] - con.run(cmd) + try: + con.run(cmd, capture=False, interactive=True) + except SubprocessError as ex: + if isinstance(con, SshConnection) and ex.status == 255: + # 255 indicates SSH itself failed, rather than a command run on the remote host. + # In this case, report a host connection error so additional troubleshooting output is provided. + if not args.delegate and not args.host_path: + def callback() -> None: + """Callback to run during error display.""" + target_profile.on_target_failure() # when the controller is not delegated, report failures immediately + else: + callback = None + + raise HostConnectionError(f'SSH shell connection failed for host {target_profile.config}: {ex}', callback) from ex + + raise diff --git a/test/lib/ansible_test/_internal/commands/units/__init__.py b/test/lib/ansible_test/_internal/commands/units/__init__.py index 995f715937..1ad0e2f6c4 100644 --- a/test/lib/ansible_test/_internal/commands/units/__init__.py +++ b/test/lib/ansible_test/_internal/commands/units/__init__.py @@ -103,7 +103,7 @@ def command_units(args): # type: (UnitsConfig) -> None paths = [target.path for target in include] - content_config = get_content_config() + content_config = get_content_config(args) supported_remote_python_versions = content_config.modules.python_versions if content_config.modules.controller_only: @@ -295,7 +295,7 @@ def command_units(args): # type: (UnitsConfig) -> None display.info('Unit test %s with Python %s' % (test_context, python.version)) try: - cover_python(args, python, cmd, test_context, env) + cover_python(args, python, cmd, test_context, env, capture=False) except SubprocessError as ex: # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case if ex.status != 5: @@ -311,9 +311,9 @@ def get_units_ansible_python_path(args, test_context): # type: (UnitsConfig, st return get_ansible_python_path(args) try: - cache = get_units_ansible_python_path.cache + cache = get_units_ansible_python_path.cache # type: ignore[attr-defined] except AttributeError: - cache = get_units_ansible_python_path.cache = {} + cache = get_units_ansible_python_path.cache = {} # type: ignore[attr-defined] python_path = cache.get(test_context) diff --git a/test/lib/ansible_test/_internal/compat/packaging.py b/test/lib/ansible_test/_internal/compat/packaging.py index a38e1abc2b..44c2bdbbd6 100644 --- a/test/lib/ansible_test/_internal/compat/packaging.py +++ b/test/lib/ansible_test/_internal/compat/packaging.py @@ -1,14 +1,16 @@ """Packaging compatibility.""" from __future__ import annotations +import typing as t + try: from packaging import ( specifiers, version, ) - SpecifierSet = specifiers.SpecifierSet - Version = version.Version + SpecifierSet = specifiers.SpecifierSet # type: t.Optional[t.Type[specifiers.SpecifierSet]] + Version = version.Version # type: t.Optional[t.Type[version.Version]] PACKAGING_IMPORT_ERROR = None except ImportError as ex: SpecifierSet = None # pylint: disable=invalid-name diff --git a/test/lib/ansible_test/_internal/compat/yaml.py b/test/lib/ansible_test/_internal/compat/yaml.py index daa5ef0ed4..e4dbb651b1 100644 --- a/test/lib/ansible_test/_internal/compat/yaml.py +++ b/test/lib/ansible_test/_internal/compat/yaml.py @@ -1,6 +1,8 @@ """PyYAML compatibility.""" from __future__ import annotations +import typing as t + from functools import ( partial, ) @@ -13,7 +15,7 @@ except ImportError as ex: YAML_IMPORT_ERROR = ex else: try: - _SafeLoader = _yaml.CSafeLoader + _SafeLoader = _yaml.CSafeLoader # type: t.Union[t.Type[_yaml.CSafeLoader], t.Type[_yaml.SafeLoader]] except AttributeError: _SafeLoader = _yaml.SafeLoader diff --git a/test/lib/ansible_test/_internal/completion.py b/test/lib/ansible_test/_internal/completion.py index 86674cb2ff..afa437a405 100644 --- a/test/lib/ansible_test/_internal/completion.py +++ b/test/lib/ansible_test/_internal/completion.py @@ -3,6 +3,7 @@ from __future__ import annotations import abc import dataclasses +import enum import os import typing as t @@ -21,6 +22,30 @@ from .data import ( data_context, ) +from .become import ( + SUPPORTED_BECOME_METHODS, +) + + +class CGroupVersion(enum.Enum): + """The control group version(s) required by a container.""" + NONE = 'none' + V1_ONLY = 'v1-only' + V2_ONLY = 'v2-only' + V1_V2 = 'v1-v2' + + def __repr__(self) -> str: + return f'{self.__class__.__name__}.{self.name}' + + +class AuditMode(enum.Enum): + """The audit requirements of a container.""" + NONE = 'none' + REQUIRED = 'required' + + def __repr__(self) -> str: + return f'{self.__class__.__name__}.{self.name}' + @dataclasses.dataclass(frozen=True) class CompletionConfig(metaclass=abc.ABCMeta): @@ -79,6 +104,7 @@ class PythonCompletionConfig(PosixCompletionConfig, metaclass=abc.ABCMeta): class RemoteCompletionConfig(CompletionConfig): """Base class for completion configuration of remote environments provisioned through Ansible Core CI.""" provider: t.Optional[str] = None + arch: t.Optional[str] = None @property def platform(self): @@ -99,6 +125,9 @@ class RemoteCompletionConfig(CompletionConfig): if not self.provider: raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.') + if not self.arch: + raise Exception(f'Remote completion entry "{self.name}" must provide a "arch" setting.') + @dataclasses.dataclass(frozen=True) class InventoryCompletionConfig(CompletionConfig): @@ -132,6 +161,8 @@ class DockerCompletionConfig(PythonCompletionConfig): """Configuration for Docker containers.""" image: str = '' seccomp: str = 'default' + cgroup: str = CGroupVersion.V1_V2.value + audit: str = AuditMode.REQUIRED.value # most containers need this, so the default is required, leaving it to be opt-out for containers which don't need it placeholder: bool = False @property @@ -139,6 +170,22 @@ class DockerCompletionConfig(PythonCompletionConfig): """True if the completion entry is only used for defaults, otherwise False.""" return False + @property + def audit_enum(self) -> AuditMode: + """The audit requirements for the container. Raises an exception if the value is invalid.""" + try: + return AuditMode(self.audit) + except ValueError: + raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.audit}" for the "audit" setting.') from None + + @property + def cgroup_enum(self) -> CGroupVersion: + """The control group version(s) required by the container. Raises an exception if the value is invalid.""" + try: + return CGroupVersion(self.cgroup) + except ValueError: + raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.cgroup}" for the "cgroup" setting.') from None + def __post_init__(self): if not self.image: raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.') @@ -146,20 +193,36 @@ class DockerCompletionConfig(PythonCompletionConfig): if not self.supported_pythons and not self.placeholder: raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.') + # verify properties can be correctly parsed to enums + assert self.audit_enum + assert self.cgroup_enum + @dataclasses.dataclass(frozen=True) class NetworkRemoteCompletionConfig(RemoteCompletionConfig): """Configuration for remote network platforms.""" collection: str = '' connection: str = '' + placeholder: bool = False + + def __post_init__(self): + if not self.placeholder: + super().__post_init__() @dataclasses.dataclass(frozen=True) class PosixRemoteCompletionConfig(RemoteCompletionConfig, PythonCompletionConfig): """Configuration for remote POSIX platforms.""" + become: t.Optional[str] = None placeholder: bool = False def __post_init__(self): + if not self.placeholder: + super().__post_init__() + + if self.become and self.become not in SUPPORTED_BECOME_METHODS: + raise Exception(f'POSIX remote completion entry "{self.name}" setting "become" must be omitted or one of: {", ".join(SUPPORTED_BECOME_METHODS)}') + if not self.supported_pythons: if self.version and not self.placeholder: raise Exception(f'POSIX remote completion entry "{self.name}" must provide a "python" setting.') @@ -211,9 +274,9 @@ def filter_completion( controller_only=False, # type: bool include_defaults=False, # type: bool ): # type: (...) -> t.Dict[str, TCompletionConfig] - """Return a the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified.""" + """Return the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified.""" if controller_only: - completion = {name: config for name, config in completion.items() if config.controller_supported} + completion = {name: config for name, config in completion.items() if isinstance(config, PosixCompletionConfig) and config.controller_supported} if not include_defaults: completion = {name: config for name, config in completion.items() if not config.is_default} diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py index e5c213f772..4061dd8ae5 100644 --- a/test/lib/ansible_test/_internal/config.py +++ b/test/lib/ansible_test/_internal/config.py @@ -1,6 +1,7 @@ """Configuration classes.""" from __future__ import annotations +import dataclasses import enum import os import sys @@ -10,6 +11,7 @@ from .util import ( display, verify_sys_executable, version_to_str, + type_guard, ) from .util_common import ( @@ -47,27 +49,20 @@ class TerminateMode(enum.Enum): return self.name.lower() -class ParsedRemote: - """A parsed version of a "remote" string.""" - def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None - self.arch = arch - self.platform = platform - self.version = version +@dataclasses.dataclass(frozen=True) +class ModulesConfig: + """Configuration for modules.""" + python_requires: str + python_versions: tuple[str, ...] + controller_only: bool - @staticmethod - def parse(value): # type: (str) -> t.Optional['ParsedRemote'] - """Return a ParsedRemote from the given value or None if the syntax is invalid.""" - parts = value.split('/') - if len(parts) == 2: - arch = None - platform, version = parts - elif len(parts) == 3: - arch, platform, version = parts - else: - return None - - return ParsedRemote(arch, platform, version) +@dataclasses.dataclass(frozen=True) +class ContentConfig: + """Configuration for all content.""" + modules: ModulesConfig + python_versions: tuple[str, ...] + py2_support: bool class EnvironmentConfig(CommonConfig): @@ -81,6 +76,10 @@ class EnvironmentConfig(CommonConfig): self.pypi_proxy = args.pypi_proxy # type: bool self.pypi_endpoint = args.pypi_endpoint # type: t.Optional[str] + # Populated by content_config.get_content_config on the origin. + # Serialized and passed to delegated instances to avoid parsing a second time. + self.content_config = None # type: t.Optional[ContentConfig] + # Set by check_controller_python once HostState has been created by prepare_profiles. # This is here for convenience, to avoid needing to pass HostState to some functions which already have access to EnvironmentConfig. self.controller_python = None # type: t.Optional[PythonConfig] @@ -96,7 +95,7 @@ class EnvironmentConfig(CommonConfig): not isinstance(self.controller, OriginConfig) or isinstance(self.controller.python, VirtualPythonConfig) or self.controller.python.version != version_to_str(sys.version_info[:2]) - or verify_sys_executable(self.controller.python.path) + or bool(verify_sys_executable(self.controller.python.path)) ) self.docker_network = args.docker_network # type: t.Optional[str] @@ -112,6 +111,9 @@ class EnvironmentConfig(CommonConfig): self.delegate_args = [] # type: t.List[str] + self.dev_systemd_debug: bool = args.dev_systemd_debug + self.dev_probe_cgroups: t.Optional[str] = args.dev_probe_cgroups + def host_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None """Add the host files to the payload file list.""" config = self @@ -119,9 +121,11 @@ class EnvironmentConfig(CommonConfig): if config.host_path: settings_path = os.path.join(config.host_path, 'settings.dat') state_path = os.path.join(config.host_path, 'state.dat') + config_path = os.path.join(config.host_path, 'config.dat') files.append((os.path.abspath(settings_path), settings_path)) files.append((os.path.abspath(state_path), state_path)) + files.append((os.path.abspath(config_path), config_path)) data_context().register_payload_callback(host_callback) @@ -161,16 +165,14 @@ class EnvironmentConfig(CommonConfig): def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] """ Return a list of target host configurations. - Requires that there are one or more targets, all of the specified type. + Requires that there are one or more targets, all the specified type. """ if not self.targets: raise Exception('There must be one or more targets.') - for target in self.targets: - if not isinstance(target, target_type): - raise Exception(f'Target is {type(target_type)} instead of {target_type}.') + assert type_guard(self.targets, target_type) - return self.targets + return t.cast(t.List[THostConfig], self.targets) @property def target_type(self): # type: () -> t.Type[HostConfig] @@ -218,7 +220,7 @@ class TestConfig(EnvironmentConfig): self.failure_ok = getattr(args, 'failure_ok', False) # type: bool self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata() - self.metadata_path = None + self.metadata_path = None # type: t.Optional[str] if self.coverage_check: self.coverage = True @@ -238,7 +240,12 @@ class ShellConfig(EnvironmentConfig): def __init__(self, args): # type: (t.Any) -> None super().__init__(args, 'shell') + self.cmd = args.cmd # type: t.List[str] self.raw = args.raw # type: bool + self.check_layout = self.delegate # allow shell to be used without a valid layout as long as no delegation is required + self.interactive = sys.stdin.isatty() and not args.cmd # delegation should only be interactive when stdin is a TTY and no command was given + self.export = args.export # type: t.Optional[str] + self.display_stderr = True class SanityConfig(TestConfig): @@ -254,7 +261,7 @@ class SanityConfig(TestConfig): self.keep_git = args.keep_git # type: bool self.prime_venvs = args.prime_venvs # type: bool - self.info_stderr = self.lint + self.display_stderr = self.lint or self.list_tests if self.keep_git: def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None @@ -293,7 +300,7 @@ class IntegrationConfig(TestConfig): if self.list_targets: self.explain = True - self.info_stderr = True + self.display_stderr = True def get_ansible_config(self): # type: () -> str """Return the path to the Ansible config for the given config.""" diff --git a/test/lib/ansible_test/_internal/connections.py b/test/lib/ansible_test/_internal/connections.py index ddf4e8df38..f63308e3f8 100644 --- a/test/lib/ansible_test/_internal/connections.py +++ b/test/lib/ansible_test/_internal/connections.py @@ -3,7 +3,6 @@ from __future__ import annotations import abc import shlex -import sys import tempfile import typing as t @@ -17,6 +16,7 @@ from .config import ( from .util import ( Display, + OutputStream, SubprocessError, retry, ) @@ -34,6 +34,7 @@ from .docker_util import ( from .ssh import ( SshConnectionDetail, + ssh_options_to_list, ) from .become import ( @@ -46,10 +47,12 @@ class Connection(metaclass=abc.ABCMeta): @abc.abstractmethod def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" @@ -58,11 +61,9 @@ class Connection(metaclass=abc.ABCMeta): src, # type: t.IO[bytes] ): """Extract the given archive file stream in the specified directory.""" - # This will not work on AIX. - # However, AIX isn't supported as a controller, which is where this would be needed. tar_cmd = ['tar', 'oxzf', '-', '-C', chdir] - retry(lambda: self.run(tar_cmd, stdin=src)) + retry(lambda: self.run(tar_cmd, stdin=src, capture=True)) def create_archive(self, chdir, # type: str @@ -75,18 +76,16 @@ class Connection(metaclass=abc.ABCMeta): gzip_cmd = ['gzip'] if exclude: - # This will not work on AIX. - # However, AIX isn't supported as a controller, which is where this would be needed. tar_cmd += ['--exclude', exclude] tar_cmd.append(name) - # Using gzip to compress the archive allows this to work on all POSIX systems we support, including AIX. + # Using gzip to compress the archive allows this to work on all POSIX systems we support. commands = [tar_cmd, gzip_cmd] sh_cmd = ['sh', '-c', ' | '.join(' '.join(shlex.quote(cmd) for cmd in command) for command in commands)] - retry(lambda: self.run(sh_cmd, stdout=dst)) + retry(lambda: self.run(sh_cmd, stdout=dst, capture=True)) class LocalConnection(Connection): @@ -96,10 +95,12 @@ class LocalConnection(Connection): def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" return run_command( @@ -109,6 +110,8 @@ class LocalConnection(Connection): data=data, stdin=stdin, stdout=stdout, + interactive=interactive, + output_stream=output_stream, ) @@ -121,7 +124,7 @@ class SshConnection(Connection): self.options = ['-i', settings.identity_file] - ssh_options = dict( + ssh_options: dict[str, t.Union[int, str]] = dict( BatchMode='yes', StrictHostKeyChecking='no', UserKnownHostsFile='/dev/null', @@ -129,15 +132,18 @@ class SshConnection(Connection): ServerAliveCountMax=4, ) - for ssh_option in sorted(ssh_options): - self.options.extend(['-o', f'{ssh_option}={ssh_options[ssh_option]}']) + ssh_options.update(settings.options) + + self.options.extend(ssh_options_to_list(ssh_options)) def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" options = list(self.options) @@ -147,7 +153,7 @@ class SshConnection(Connection): options.append('-q') - if not data and not stdin and not stdout and sys.stdin.isatty(): + if interactive: options.append('-tt') with tempfile.NamedTemporaryFile(prefix='ansible-test-ssh-debug-', suffix='.log') as ssh_logfile: @@ -170,6 +176,8 @@ class SshConnection(Connection): data=data, stdin=stdin, stdout=stdout, + interactive=interactive, + output_stream=output_stream, error_callback=error_callback, ) @@ -212,10 +220,12 @@ class DockerConnection(Connection): def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" options = [] @@ -223,7 +233,7 @@ class DockerConnection(Connection): if self.user: options.extend(['--user', self.user]) - if not data and not stdin and not stdout and sys.stdin.isatty(): + if interactive: options.append('-it') return docker_exec( @@ -235,6 +245,8 @@ class DockerConnection(Connection): data=data, stdin=stdin, stdout=stdout, + interactive=interactive, + output_stream=output_stream, ) def inspect(self): # type: () -> DockerInspect diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py index cac7240872..609e3cfcfd 100644 --- a/test/lib/ansible_test/_internal/constants.py +++ b/test/lib/ansible_test/_internal/constants.py @@ -6,6 +6,8 @@ from .._util.target.common.constants import ( REMOTE_ONLY_PYTHON_VERSIONS, ) +STATUS_HOST_CONNECTION_ERROR = 4 + # Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True. # This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time. SOFT_RLIMIT_NOFILE = 1024 diff --git a/test/lib/ansible_test/_internal/containers.py b/test/lib/ansible_test/_internal/containers.py index 7ffbfb4c20..e5fe8e94d2 100644 --- a/test/lib/ansible_test/_internal/containers.py +++ b/test/lib/ansible_test/_internal/containers.py @@ -15,7 +15,6 @@ from .util import ( ApplicationError, SubprocessError, display, - get_host_ip, sanitize_host_name, ) @@ -35,15 +34,19 @@ from .config import ( from .docker_util import ( ContainerNotFoundError, DockerInspect, + docker_create, docker_exec, docker_inspect, + docker_network_inspect, docker_pull, docker_rm, docker_run, docker_start, get_docker_container_id, get_docker_host_ip, + get_podman_host_ip, require_docker, + detect_host_properties, ) from .ansible_util import ( @@ -80,6 +83,10 @@ from .connections import ( SshConnection, ) +from .thread import ( + mutex, +) + # information about support containers provisioned by the current ansible-test instance support_containers = {} # type: t.Dict[str, ContainerDescriptor] support_containers_mutex = threading.Lock() @@ -103,7 +110,7 @@ def run_support_container( args, # type: EnvironmentConfig context, # type: str image, # type: str - name, # type: name + name, # type: str ports, # type: t.List[int] aliases=None, # type: t.Optional[t.List[str]] start=True, # type: bool @@ -138,10 +145,10 @@ def run_support_container( if current_container_id: publish_ports = False # publishing ports is pointless if already running in a docker container - options = (options or []) + ['--name', name] + options = (options or []) if start: - options.append('-d') + options.append('-dt') # the -t option is required to cause systemd in the container to log output to the console if publish_ports: for port in ports: @@ -151,6 +158,10 @@ def run_support_container( for key, value in env.items(): options.extend(['--env', '%s=%s' % (key, value)]) + max_open_files = detect_host_properties(args).max_open_files + + options.extend(['--ulimit', 'nofile=%s' % max_open_files]) + support_container_id = None if allow_existing: @@ -175,6 +186,9 @@ def run_support_container( if not support_container_id: docker_rm(args, name) + if args.dev_systemd_debug: + options.extend(('--env', 'SYSTEMD_LOG_LEVEL=debug')) + if support_container_id: display.info('Using existing "%s" container.' % name) running = True @@ -182,7 +196,7 @@ def run_support_container( else: display.info('Starting new "%s" container.' % name) docker_pull(args, image) - support_container_id = docker_run(args, image, options, create_only=not start, cmd=cmd) + support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd) running = start existing = False @@ -220,10 +234,130 @@ def run_support_container( return descriptor +def run_container( + args: EnvironmentConfig, + image: str, + name: str, + options: t.Optional[list[str]], + cmd: t.Optional[list[str]] = None, + create_only: bool = False, +) -> str: + """Run a container using the given docker image.""" + options = list(options or []) + cmd = list(cmd or []) + + options.extend(['--name', name]) + + network = get_docker_preferred_network_name(args) + + if is_docker_user_defined_network(network): + # Only when the network is not the default bridge network. + options.extend(['--network', network]) + + for _iteration in range(1, 3): + try: + if create_only: + stdout = docker_create(args, image, options, cmd)[0] + else: + stdout = docker_run(args, image, options, cmd)[0] + except SubprocessError as ex: + display.error(ex.message) + display.warning(f'Failed to run docker image "{image}". Waiting a few seconds before trying again.') + docker_rm(args, name) # podman doesn't remove containers after create if run fails + time.sleep(3) + else: + if args.explain: + stdout = ''.join(random.choice('0123456789abcdef') for _iteration in range(64)) + + return stdout.strip() + + raise ApplicationError(f'Failed to run docker image "{image}".') + + +def start_container(args: EnvironmentConfig, container_id: str) -> tuple[t.Optional[str], t.Optional[str]]: + """Start a docker container by name or ID.""" + options: list[str] = [] + + for _iteration in range(1, 3): + try: + return docker_start(args, container_id, options) + except SubprocessError as ex: + display.error(ex.message) + display.warning(f'Failed to start docker container "{container_id}". Waiting a few seconds before trying again.') + time.sleep(3) + + raise ApplicationError(f'Failed to start docker container "{container_id}".') + + +def get_container_ip_address(args: EnvironmentConfig, container: DockerInspect) -> t.Optional[str]: + """Return the IP address of the container for the preferred docker network.""" + if container.networks: + network_name = get_docker_preferred_network_name(args) + + if not network_name: + # Sort networks and use the first available. + # This assumes all containers will have access to the same networks. + network_name = sorted(container.networks.keys()).pop(0) + + ipaddress = container.networks[network_name]['IPAddress'] + else: + ipaddress = container.network_settings['IPAddress'] + + if not ipaddress: + return None + + return ipaddress + + +@mutex +def get_docker_preferred_network_name(args: EnvironmentConfig) -> t.Optional[str]: + """ + Return the preferred network name for use with Docker. The selection logic is: + - the network selected by the user with `--docker-network` + - the network of the currently running docker container (if any) + - the default docker network (returns None) + """ + try: + return get_docker_preferred_network_name.network # type: ignore[attr-defined] + except AttributeError: + pass + + network = None + + if args.docker_network: + network = args.docker_network + else: + current_container_id = get_docker_container_id() + + if current_container_id: + # Make sure any additional containers we launch use the same network as the current container we're running in. + # This is needed when ansible-test is running in a container that is not connected to Docker's default network. + container = docker_inspect(args, current_container_id, always=True) + network = container.get_network_name() + + # The default docker behavior puts containers on the same network. + # The default podman behavior puts containers on isolated networks which don't allow communication between containers or network disconnect. + # Starting with podman version 2.1.0 rootless containers are able to join networks. + # Starting with podman version 2.2.0 containers can be disconnected from networks. + # To maintain feature parity with docker, detect and use the default "podman" network when running under podman. + if network is None and require_docker().command == 'podman' and docker_network_inspect(args, 'podman', always=True): + network = 'podman' + + get_docker_preferred_network_name.network = network # type: ignore[attr-defined] + + return network + + +def is_docker_user_defined_network(network: str) -> bool: + """Return True if the network being used is a user-defined network.""" + return bool(network) and network != 'bridge' + + +@mutex def get_container_database(args): # type: (EnvironmentConfig) -> ContainerDatabase """Return the current container database, creating it as needed, or returning the one provided on the command line through delegation.""" try: - return get_container_database.database + return get_container_database.database # type: ignore[attr-defined] except AttributeError: pass @@ -236,9 +370,9 @@ def get_container_database(args): # type: (EnvironmentConfig) -> ContainerDatab display.info('>>> Container Database\n%s' % json.dumps(database.to_dict(), indent=4, sort_keys=True), verbosity=3) - get_container_database.database = database + get_container_database.database = database # type: ignore[attr-defined] - return get_container_database.database + return database class ContainerAccess: @@ -286,7 +420,7 @@ class ContainerAccess: def to_dict(self): # type: () -> t.Dict[str, t.Any] """Return a dict of the current instance.""" - value = dict( + value: t.Dict[str, t.Any] = dict( host_ip=self.host_ip, names=self.names, ) @@ -350,8 +484,12 @@ def create_container_database(args): # type: (EnvironmentConfig) -> ContainerDa for name, container in support_containers.items(): if container.details.published_ports: + if require_docker().command == 'podman': + host_ip_func = get_podman_host_ip + else: + host_ip_func = get_docker_host_ip published_access = ContainerAccess( - host_ip=get_docker_host_ip(), + host_ip=host_ip_func(), names=container.aliases, ports=None, forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()), @@ -370,7 +508,7 @@ def create_container_database(args): # type: (EnvironmentConfig) -> ContainerDa elif require_docker().command == 'podman': # published ports for rootless podman containers should be accessible from the host's IP container_access = ContainerAccess( - host_ip=get_host_ip(), + host_ip=get_podman_host_ip(), names=container.aliases, ports=None, forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()), @@ -457,7 +595,7 @@ class SupportContainerContext: def support_container_context( args, # type: EnvironmentConfig ssh, # type: t.Optional[SshConnectionDetail] -): # type: (...) -> t.Optional[ContainerDatabase] +): # type: (...) -> t.Iterator[t.Optional[ContainerDatabase]] """Create a context manager for integration tests that use support containers.""" if not isinstance(args, (IntegrationConfig, UnitsConfig, SanityConfig, ShellConfig)): yield None # containers are only needed for commands that have targets (hosts or pythons) @@ -514,7 +652,7 @@ def create_support_container_context( try: port_forwards = process.collect_port_forwards() - contexts = {} + contexts = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]] for forward, forwarded_port in port_forwards.items(): access_host, access_port = forward @@ -567,7 +705,7 @@ class ContainerDescriptor: def start(self, args): # type: (EnvironmentConfig) -> None """Start the container. Used for containers which are created, but not started.""" - docker_start(args, self.name) + start_container(args, self.name) self.register(args) @@ -577,7 +715,7 @@ class ContainerDescriptor: raise Exception('Container already registered: %s' % self.name) try: - container = docker_inspect(args, self.container_id) + container = docker_inspect(args, self.name) except ContainerNotFoundError: if not args.explain: raise @@ -594,7 +732,7 @@ class ContainerDescriptor: ), )) - support_container_ip = container.get_ip_address() + support_container_ip = get_container_ip_address(args, container) if self.publish_ports: # inspect the support container to locate the published ports @@ -659,7 +797,7 @@ def cleanup_containers(args): # type: (EnvironmentConfig) -> None if container.cleanup == CleanupMode.YES: docker_rm(args, container.container_id) elif container.cleanup == CleanupMode.INFO: - display.notice('Remember to run `docker rm -f %s` when finished testing.' % container.name) + display.notice(f'Remember to run `{require_docker().command} rm -f {container.name}` when finished testing.') def create_hosts_entries(context): # type: (t.Dict[str, ContainerAccess]) -> t.List[str] @@ -702,8 +840,8 @@ def create_container_hooks( else: managed_type = 'posix' - control_state = {} - managed_state = {} + control_state = {} # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]] + managed_state = {} # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]] def pre_target(target): """Configure hosts for SSH port forwarding required by the specified target.""" @@ -722,7 +860,7 @@ def create_container_hooks( def create_managed_contexts(control_contexts): # type: (t.Dict[str, t.Dict[str, ContainerAccess]]) -> t.Dict[str, t.Dict[str, ContainerAccess]] """Create managed contexts from the given control contexts.""" - managed_contexts = {} + managed_contexts = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]] for context_name, control_context in control_contexts.items(): managed_context = managed_contexts[context_name] = {} @@ -789,8 +927,8 @@ def forward_ssh_ports( hosts_entries = create_hosts_entries(test_context) inventory = generate_ssh_inventory(ssh_connections) - with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: - run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries)) + with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str + run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries)) ssh_processes = [] # type: t.List[SshProcess] @@ -822,8 +960,8 @@ def cleanup_ssh_ports( inventory = generate_ssh_inventory(ssh_connections) - with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: - run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries)) + with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str + run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries)) if ssh_processes: for process in ssh_processes: diff --git a/test/lib/ansible_test/_internal/content_config.py b/test/lib/ansible_test/_internal/content_config.py index 10574cc0b6..39a8d4125c 100644 --- a/test/lib/ansible_test/_internal/content_config.py +++ b/test/lib/ansible_test/_internal/content_config.py @@ -2,6 +2,7 @@ from __future__ import annotations import os +import pickle import typing as t from .constants import ( @@ -21,6 +22,7 @@ from .compat.yaml import ( ) from .io import ( + open_binary_file, read_text_file, ) @@ -28,54 +30,59 @@ from .util import ( ApplicationError, display, str_to_version, - cache, ) from .data import ( data_context, ) +from .config import ( + EnvironmentConfig, + ContentConfig, + ModulesConfig, +) MISSING = object() -class BaseConfig: - """Base class for content configuration.""" - def __init__(self, data): # type: (t.Any) -> None - if not isinstance(data, dict): - raise Exception('config must be type `dict` not `%s`' % type(data)) - +def parse_modules_config(data: t.Any) -> ModulesConfig: + """Parse the given dictionary as module config and return it.""" + if not isinstance(data, dict): + raise Exception('config must be type `dict` not `%s`' % type(data)) -class ModulesConfig(BaseConfig): - """Configuration for modules.""" - def __init__(self, data): # type: (t.Any) -> None - super().__init__(data) + python_requires = data.get('python_requires', MISSING) - python_requires = data.get('python_requires', MISSING) + if python_requires == MISSING: + raise KeyError('python_requires is required') - if python_requires == MISSING: - raise KeyError('python_requires is required') + return ModulesConfig( + python_requires=python_requires, + python_versions=parse_python_requires(python_requires), + controller_only=python_requires == 'controller', + ) - self.python_requires = python_requires - self.python_versions = parse_python_requires(python_requires) - self.controller_only = python_requires == 'controller' +def parse_content_config(data: t.Any) -> ContentConfig: + """Parse the given dictionary as content config and return it.""" + if not isinstance(data, dict): + raise Exception('config must be type `dict` not `%s`' % type(data)) -class ContentConfig(BaseConfig): - """Configuration for all content.""" - def __init__(self, data): # type: (t.Any) -> None - super().__init__(data) + # Configuration specific to modules/module_utils. + modules = parse_modules_config(data.get('modules', {})) - # Configuration specific to modules/module_utils. - self.modules = ModulesConfig(data.get('modules', {})) + # Python versions supported by the controller, combined with Python versions supported by modules/module_utils. + # Mainly used for display purposes and to limit the Python versions used for sanity tests. + python_versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS + if version in CONTROLLER_PYTHON_VERSIONS or version in modules.python_versions) - # Python versions supported by the controller, combined with Python versions supported by modules/module_utils. - # Mainly used for display purposes and to limit the Python versions used for sanity tests. - self.python_versions = [version for version in SUPPORTED_PYTHON_VERSIONS - if version in CONTROLLER_PYTHON_VERSIONS or version in self.modules.python_versions] + # True if Python 2.x is supported. + py2_support = any(version for version in python_versions if str_to_version(version)[0] == 2) - # True if Python 2.x is supported. - self.py2_support = any(version for version in self.python_versions if str_to_version(version)[0] == 2) + return ContentConfig( + modules=modules, + python_versions=python_versions, + py2_support=py2_support, + ) def load_config(path): # type: (str) -> t.Optional[ContentConfig] @@ -95,7 +102,7 @@ def load_config(path): # type: (str) -> t.Optional[ContentConfig] return None try: - config = ContentConfig(yaml_value) + config = parse_content_config(yaml_value) except Exception as ex: # pylint: disable=broad-except display.warning('Ignoring config "%s" due a config parsing error: %s' % (path, ex)) return None @@ -105,13 +112,18 @@ def load_config(path): # type: (str) -> t.Optional[ContentConfig] return config -@cache -def get_content_config(): # type: () -> ContentConfig +def get_content_config(args): # type: (EnvironmentConfig) -> ContentConfig """ Parse and return the content configuration (if any) for the current collection. For ansible-core, a default configuration is used. Results are cached. """ + if args.host_path: + args.content_config = deserialize_content_config(os.path.join(args.host_path, 'config.dat')) + + if args.content_config: + return args.content_config + collection_config_path = 'tests/config.yml' config = None @@ -120,7 +132,7 @@ def get_content_config(): # type: () -> ContentConfig config = load_config(collection_config_path) if not config: - config = ContentConfig(dict( + config = parse_content_config(dict( modules=dict( python_requires='default', ), @@ -132,20 +144,36 @@ def get_content_config(): # type: () -> ContentConfig 'This collection provides the Python requirement: %s' % ( ', '.join(SUPPORTED_PYTHON_VERSIONS), config.modules.python_requires)) + args.content_config = config + return config -def parse_python_requires(value): # type: (t.Any) -> t.List[str] +def parse_python_requires(value): # type: (t.Any) -> tuple[str, ...] """Parse the given 'python_requires' version specifier and return the matching Python versions.""" if not isinstance(value, str): raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value)) + versions: tuple[str, ...] + if value == 'default': - versions = list(SUPPORTED_PYTHON_VERSIONS) + versions = SUPPORTED_PYTHON_VERSIONS elif value == 'controller': - versions = list(CONTROLLER_PYTHON_VERSIONS) + versions = CONTROLLER_PYTHON_VERSIONS else: specifier_set = SpecifierSet(value) - versions = [version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version))] + versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version))) return versions + + +def serialize_content_config(args: EnvironmentConfig, path: str) -> None: + """Serialize the content config to the given path. If the config has not been loaded, an empty config will be serialized.""" + with open_binary_file(path, 'wb') as config_file: + pickle.dump(args.content_config, config_file) + + +def deserialize_content_config(path: str) -> ContentConfig: + """Deserialize content config from the path.""" + with open_binary_file(path) as config_file: + return pickle.load(config_file) diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py index 023b5655aa..62d063b2b7 100644 --- a/test/lib/ansible_test/_internal/core_ci.py +++ b/test/lib/ansible_test/_internal/core_ci.py @@ -1,6 +1,8 @@ """Access Ansible Core CI remote services.""" from __future__ import annotations +import abc +import dataclasses import json import os import re @@ -48,6 +50,65 @@ from .data import ( ) +@dataclasses.dataclass(frozen=True) +class Resource(metaclass=abc.ABCMeta): + """Base class for Ansible Core CI resources.""" + @abc.abstractmethod + def as_tuple(self) -> t.Tuple[str, str, str, str]: + """Return the resource as a tuple of platform, version, architecture and provider.""" + + @abc.abstractmethod + def get_label(self) -> str: + """Return a user-friendly label for this resource.""" + + @property + @abc.abstractmethod + def persist(self) -> bool: + """True if the resource is persistent, otherwise false.""" + + +@dataclasses.dataclass(frozen=True) +class VmResource(Resource): + """Details needed to request a VM from Ansible Core CI.""" + platform: str + version: str + architecture: str + provider: str + tag: str + + def as_tuple(self) -> t.Tuple[str, str, str, str]: + """Return the resource as a tuple of platform, version, architecture and provider.""" + return self.platform, self.version, self.architecture, self.provider + + def get_label(self) -> str: + """Return a user-friendly label for this resource.""" + return f'{self.platform} {self.version} ({self.architecture}) [{self.tag}] @{self.provider}' + + @property + def persist(self) -> bool: + """True if the resource is persistent, otherwise false.""" + return True + + +@dataclasses.dataclass(frozen=True) +class CloudResource(Resource): + """Details needed to request cloud credentials from Ansible Core CI.""" + platform: str + + def as_tuple(self) -> t.Tuple[str, str, str, str]: + """Return the resource as a tuple of platform, version, architecture and provider.""" + return self.platform, '', '', self.platform + + def get_label(self) -> str: + """Return a user-friendly label for this resource.""" + return self.platform + + @property + def persist(self) -> bool: + """True if the resource is persistent, otherwise false.""" + return False + + class AnsibleCoreCI: """Client for Ansible Core CI services.""" DEFAULT_ENDPOINT = 'https://ansible-core-ci.testing.ansible.com' @@ -55,16 +116,12 @@ class AnsibleCoreCI: def __init__( self, args, # type: EnvironmentConfig - platform, # type: str - version, # type: str - provider, # type: str - persist=True, # type: bool + resource, # type: Resource load=True, # type: bool - suffix=None, # type: t.Optional[str] ): # type: (...) -> None self.args = args - self.platform = platform - self.version = version + self.resource = resource + self.platform, self.version, self.arch, self.provider = self.resource.as_tuple() self.stage = args.remote_stage self.client = HttpClient(args) self.connection = None @@ -73,41 +130,39 @@ class AnsibleCoreCI: self.default_endpoint = args.remote_endpoint or self.DEFAULT_ENDPOINT self.retries = 3 self.ci_provider = get_ci_provider() - self.provider = provider - self.name = '%s-%s' % (self.platform, self.version) + self.label = self.resource.get_label() - if suffix: - self.name += '-' + suffix + stripped_label = re.sub('[^A-Za-z0-9_.]+', '-', self.label).strip('-') - self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage)) + self.name = f"{stripped_label}-{self.stage}" # turn the label into something suitable for use as a filename + + self.path = os.path.expanduser(f'~/.ansible/test/instances/{self.name}') self.ssh_key = SshKey(args) - if persist and load and self._load(): + if self.resource.persist and load and self._load(): try: - display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Checking existing {self.label} instance using: {self._uri}', verbosity=1) self.connection = self.get(always_raise_on=[404]) - display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) + display.info(f'Loaded existing {self.label} instance.', verbosity=1) except HttpError as ex: if ex.status != 404: raise self._clear() - display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Cleared stale {self.label} instance.', verbosity=1) self.instance_id = None self.endpoint = None - elif not persist: + elif not self.resource.persist: self.instance_id = None self.endpoint = None self._clear() if self.instance_id: - self.started = True + self.started = True # type: bool else: self.started = False self.instance_id = str(uuid.uuid4()) @@ -126,8 +181,7 @@ class AnsibleCoreCI: def start(self): """Start instance.""" if self.started: - display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Skipping started {self.label} instance.', verbosity=1) return None return self._start(self.ci_provider.prepare_core_ci_auth()) @@ -135,22 +189,19 @@ class AnsibleCoreCI: def stop(self): """Stop instance.""" if not self.started: - display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Skipping invalid {self.label} instance.', verbosity=1) return response = self.client.delete(self._uri) if response.status_code == 404: self._clear() - display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Cleared invalid {self.label} instance.', verbosity=1) return if response.status_code == 200: self._clear() - display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Stopped running {self.label} instance.', verbosity=1) return raise self._create_http_error(response) @@ -158,8 +209,7 @@ class AnsibleCoreCI: def get(self, tries=3, sleep=15, always_raise_on=None): # type: (int, int, t.Optional[t.List[int]]) -> t.Optional[InstanceConnection] """Get instance connection information.""" if not self.started: - display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Skipping invalid {self.label} instance.', verbosity=1) return None if not always_raise_on: @@ -180,7 +230,7 @@ class AnsibleCoreCI: if not tries or response.status_code in always_raise_on: raise error - display.warning('%s. Trying again after %d seconds.' % (error, sleep)) + display.warning(f'{error}. Trying again after {sleep} seconds.') time.sleep(sleep) if self.args.explain: @@ -216,9 +266,7 @@ class AnsibleCoreCI: status = 'running' if self.connection.running else 'starting' - display.info('Status update: %s/%s on instance %s is %s.' % - (self.platform, self.version, self.instance_id, status), - verbosity=1) + display.info(f'The {self.label} instance is {status}.', verbosity=1) return self.connection @@ -229,16 +277,15 @@ class AnsibleCoreCI: return time.sleep(10) - raise ApplicationError('Timeout waiting for %s/%s instance %s.' % - (self.platform, self.version, self.instance_id)) + raise ApplicationError(f'Timeout waiting for {self.label} instance.') @property def _uri(self): - return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.provider, self.instance_id) + return f'{self.endpoint}/{self.stage}/{self.provider}/{self.instance_id}' def _start(self, auth): """Start instance.""" - display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) + display.info(f'Initializing new {self.label} instance using: {self._uri}', verbosity=1) if self.platform == 'windows': winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1')) @@ -249,6 +296,7 @@ class AnsibleCoreCI: config=dict( platform=self.platform, version=self.version, + architecture=self.arch, public_key=self.ssh_key.pub_contents, query=False, winrm_config=winrm_config, @@ -266,7 +314,7 @@ class AnsibleCoreCI: self.started = True self._save() - display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) + display.info(f'Started {self.label} instance.', verbosity=1) if self.args.explain: return {} @@ -277,8 +325,6 @@ class AnsibleCoreCI: tries = self.retries sleep = 15 - display.info('Trying endpoint: %s' % self.endpoint, verbosity=1) - while True: tries -= 1 response = self.client.put(self._uri, data=json.dumps(data), headers=headers) @@ -294,7 +340,7 @@ class AnsibleCoreCI: if not tries: raise error - display.warning('%s. Trying again after %d seconds.' % (error, sleep)) + display.warning(f'{error}. Trying again after {sleep} seconds.') time.sleep(sleep) def _clear(self): @@ -345,14 +391,14 @@ class AnsibleCoreCI: def save(self): # type: () -> t.Dict[str, str] """Save instance details and return as a dictionary.""" return dict( - platform_version='%s/%s' % (self.platform, self.version), + label=self.resource.get_label(), instance_id=self.instance_id, endpoint=self.endpoint, ) @staticmethod def _create_http_error(response): # type: (HttpResponse) -> ApplicationError - """Return an exception created from the given HTTP resposne.""" + """Return an exception created from the given HTTP response.""" response_json = response.json() stack_trace = '' @@ -369,7 +415,7 @@ class AnsibleCoreCI: traceback_lines = traceback.format_list(traceback_lines) trace = '\n'.join([x.rstrip() for x in traceback_lines]) - stack_trace = ('\nTraceback (from remote server):\n%s' % trace) + stack_trace = f'\nTraceback (from remote server):\n{trace}' else: message = str(response_json) @@ -379,7 +425,7 @@ class AnsibleCoreCI: class CoreHttpError(HttpError): """HTTP response as an error.""" def __init__(self, status, remote_message, remote_stack_trace): # type: (int, str, str) -> None - super().__init__(status, '%s%s' % (remote_message, remote_stack_trace)) + super().__init__(status, f'{remote_message}{remote_stack_trace}') self.remote_message = remote_message self.remote_stack_trace = remote_stack_trace @@ -388,8 +434,8 @@ class CoreHttpError(HttpError): class SshKey: """Container for SSH key used to connect to remote instances.""" KEY_TYPE = 'rsa' # RSA is used to maintain compatibility with paramiko and EC2 - KEY_NAME = 'id_%s' % KEY_TYPE - PUB_NAME = '%s.pub' % KEY_NAME + KEY_NAME = f'id_{KEY_TYPE}' + PUB_NAME = f'{KEY_NAME}.pub' @mutex def __init__(self, args): # type: (EnvironmentConfig) -> None @@ -469,7 +515,7 @@ class SshKey: make_dirs(os.path.dirname(key)) if not os.path.isfile(key) or not os.path.isfile(pub): - run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', self.KEY_TYPE, '-N', '', '-f', key]) + run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', self.KEY_TYPE, '-N', '', '-f', key], capture=True) if args.explain: return key, pub @@ -502,6 +548,6 @@ class InstanceConnection: def __str__(self): if self.password: - return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password) + return f'{self.hostname}:{self.port} [{self.username}:{self.password}]' - return '%s:%s [%s]' % (self.hostname, self.port, self.username) + return f'{self.hostname}:{self.port} [{self.username}]' diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py index e705db76e0..869a3a3a72 100644 --- a/test/lib/ansible_test/_internal/coverage_util.py +++ b/test/lib/ansible_test/_internal/coverage_util.py @@ -41,6 +41,10 @@ from .host_configs import ( PythonConfig, ) +from .thread import ( + mutex, +) + def cover_python( args, # type: TestConfig @@ -48,7 +52,7 @@ def cover_python( cmd, # type: t.List[str] target_name, # type: str env, # type: t.Dict[str, str] - capture=False, # type: bool + capture, # type: bool data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] @@ -107,10 +111,11 @@ def get_coverage_environment( return env +@mutex def get_coverage_config(args): # type: (TestConfig) -> str """Return the path to the coverage config, creating the config if it does not already exist.""" try: - return get_coverage_config.path + return get_coverage_config.path # type: ignore[attr-defined] except AttributeError: pass @@ -122,11 +127,13 @@ def get_coverage_config(args): # type: (TestConfig) -> str temp_dir = tempfile.mkdtemp() atexit.register(lambda: remove_tree(temp_dir)) - path = get_coverage_config.path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME) + path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME) if not args.explain: write_text_file(path, coverage_config) + get_coverage_config.path = path # type: ignore[attr-defined] + return path diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py index c3b2187ca2..42fa5a2ac7 100644 --- a/test/lib/ansible_test/_internal/data.py +++ b/test/lib/ansible_test/_internal/data.py @@ -9,6 +9,7 @@ from .util import ( ApplicationError, import_plugins, is_subdir, + is_valid_identifier, ANSIBLE_LIB_ROOT, ANSIBLE_TEST_ROOT, ANSIBLE_SOURCE_ROOT, @@ -34,11 +35,19 @@ from .provider.source.installed import ( InstalledSource, ) +from .provider.source.unsupported import ( + UnsupportedSource, +) + from .provider.layout import ( ContentLayout, LayoutProvider, ) +from .provider.layout.unsupported import ( + UnsupportedLayout, +) + class DataContext: """Data context providing details about the current execution environment for ansible-test.""" @@ -109,14 +118,20 @@ class DataContext: walk, # type: bool ): # type: (...) -> ContentLayout """Create a content layout using the given providers and root path.""" - layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk) + try: + layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk) + except ProviderNotFoundForPath: + layout_provider = UnsupportedLayout(root) try: # Begin the search for the source provider at the layout provider root. # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error. # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project. # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control. - source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk) + if isinstance(layout_provider, UnsupportedLayout): + source_provider = UnsupportedSource(layout_provider.root) # type: SourceProvider + else: + source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk) except ProviderNotFoundForPath: source_provider = UnversionedSource(layout_provider.root) @@ -161,6 +176,48 @@ class DataContext: """Register the given payload callback.""" self.payload_callbacks.append(callback) + def check_layout(self) -> None: + """Report an error if the layout is unsupported.""" + if self.content.unsupported: + raise ApplicationError(self.explain_working_directory()) + + def explain_working_directory(self) -> str: + """Return a message explaining the working directory requirements.""" + blocks = [ + 'The current working directory must be within the source tree being tested.', + '', + ] + + if ANSIBLE_SOURCE_ROOT: + blocks.append(f'Testing Ansible: {ANSIBLE_SOURCE_ROOT}/') + blocks.append('') + + cwd = os.getcwd() + + blocks.append('Testing an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/') + blocks.append('Example #1: community.general -> ~/code/ansible_collections/community/general/') + blocks.append('Example #2: ansible.util -> ~/.ansible/collections/ansible_collections/ansible/util/') + blocks.append('') + blocks.append(f'Current working directory: {cwd}/') + + if os.path.basename(os.path.dirname(cwd)) == 'ansible_collections': + blocks.append(f'Expected parent directory: {os.path.dirname(cwd)}/{{namespace}}/{{collection}}/') + elif os.path.basename(cwd) == 'ansible_collections': + blocks.append(f'Expected parent directory: {cwd}/{{namespace}}/{{collection}}/') + elif 'ansible_collections' not in cwd.split(os.path.sep): + blocks.append('No "ansible_collections" parent directory was found.') + + if self.content.collection: + if not is_valid_identifier(self.content.collection.namespace): + blocks.append(f'The namespace "{self.content.collection.namespace}" is an invalid identifier or a reserved keyword.') + + if not is_valid_identifier(self.content.collection.name): + blocks.append(f'The name "{self.content.collection.name}" is an invalid identifier or a reserved keyword.') + + message = '\n'.join(blocks) + + return message + @cache def data_context(): # type: () -> DataContext @@ -173,21 +230,7 @@ def data_context(): # type: () -> DataContext for provider_type in provider_types: import_plugins('provider/%s' % provider_type) - try: - context = DataContext() - except ProviderNotFoundForPath: - options = [ - ' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/', - ] - - if ANSIBLE_SOURCE_ROOT: - options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT) - - raise ApplicationError('''The current working directory must be at or below: - -%s - -Current working directory: %s''' % ('\n'.join(options), os.getcwd())) + context = DataContext() return context diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py index a5c404d9bc..b3b8ad51dc 100644 --- a/test/lib/ansible_test/_internal/delegation.py +++ b/test/lib/ansible_test/_internal/delegation.py @@ -7,11 +7,16 @@ import os import tempfile import typing as t +from .constants import ( + STATUS_HOST_CONNECTION_ERROR, +) + from .io import ( make_dirs, ) from .config import ( + CommonConfig, EnvironmentConfig, IntegrationConfig, ShellConfig, @@ -26,6 +31,7 @@ from .util import ( ANSIBLE_BIN_PATH, ANSIBLE_LIB_ROOT, ANSIBLE_TEST_ROOT, + OutputStream, ) from .util_common import ( @@ -35,6 +41,7 @@ from .util_common import ( from .containers import ( support_container_context, + ContainerDatabase, ) from .data import ( @@ -65,9 +72,13 @@ from .provisioning import ( HostState, ) +from .content_config import ( + serialize_content_config, +) + @contextlib.contextmanager -def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState) -> None +def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState) -> t.Iterator[None] """Context manager for serialized host state during delegation.""" make_dirs(ResultType.TMP.path) @@ -78,6 +89,7 @@ def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState with tempfile.TemporaryDirectory(prefix='host-', dir=ResultType.TMP.path) as host_dir: args.host_settings.serialize(os.path.join(host_dir, 'settings.dat')) host_state.serialize(os.path.join(host_dir, 'state.dat')) + serialize_content_config(args, os.path.join(host_dir, 'config.dat')) args.host_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(host_dir)) @@ -87,8 +99,10 @@ def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState args.host_path = None -def delegate(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None +def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None """Delegate execution of ansible-test to another environment.""" + assert isinstance(args, EnvironmentConfig) + with delegation_context(args, host_state): if isinstance(args, TestConfig): args.metadata.ci_provider = get_ci_provider().code @@ -141,7 +155,7 @@ def delegate_command(args, host_state, exclude, require): # type: (EnvironmentC if not args.allow_destructive: options.append('--allow-destructive') - with support_container_context(args, ssh) as containers: + with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase] if containers: options.extend(['--containers', json.dumps(containers.to_dict())]) @@ -155,19 +169,27 @@ def delegate_command(args, host_state, exclude, require): # type: (EnvironmentC os.path.join(content_root, ResultType.COVERAGE.relative_path), ] - con.run(['mkdir', '-p'] + writable_dirs) - con.run(['chmod', '777'] + writable_dirs) - con.run(['chmod', '755', working_directory]) - con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)]) - con.run(['useradd', pytest_user, '--create-home']) - con.run(insert_options(command, options + ['--requirements-mode', 'only'])) + con.run(['mkdir', '-p'] + writable_dirs, capture=True) + con.run(['chmod', '777'] + writable_dirs, capture=True) + con.run(['chmod', '755', working_directory], capture=True) + con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)], capture=True) + con.run(['useradd', pytest_user, '--create-home'], capture=True) + + con.run(insert_options(command, options + ['--requirements-mode', 'only']), capture=False) container = con.inspect() networks = container.get_network_names() if networks is not None: for network in networks: - con.disconnect_network(network) + try: + con.disconnect_network(network) + except SubprocessError: + display.warning( + 'Unable to disconnect network "%s" (this is normal under podman). ' + 'Tests will not be isolated from the network. Network-related tests may ' + 'misbehave.' % (network,) + ) else: display.warning('Network disconnection is not supported (this is normal under podman). ' 'Tests will not be isolated from the network. Network-related tests may misbehave.') @@ -177,14 +199,27 @@ def delegate_command(args, host_state, exclude, require): # type: (EnvironmentC con.user = pytest_user success = False + status = 0 try: - con.run(insert_options(command, options)) + # When delegating, preserve the original separate stdout/stderr streams, but only when the following conditions are met: + # 1) Display output is being sent to stderr. This indicates the output on stdout must be kept separate from stderr. + # 2) The delegation is non-interactive. Interactive mode, which generally uses a TTY, is not compatible with intercepting stdout/stderr. + # The downside to having separate streams is that individual lines of output from each are more likely to appear out-of-order. + output_stream = OutputStream.ORIGINAL if args.display_stderr and not args.interactive else None + con.run(insert_options(command, options), capture=False, interactive=args.interactive, output_stream=output_stream) success = True + except SubprocessError as ex: + status = ex.status + raise finally: if host_delegation: download_results(args, con, content_root, success) + if not success and status == STATUS_HOST_CONNECTION_ERROR: + for target in host_state.target_profiles: + target.on_target_failure() # when the controller is delegated, report failures after delegation fails + def insert_options(command, options): """Insert addition command line options into the given command and return the result.""" diff --git a/test/lib/ansible_test/_internal/dev/__init__.py b/test/lib/ansible_test/_internal/dev/__init__.py new file mode 100644 index 0000000000..e7c9b7d54f --- /dev/null +++ b/test/lib/ansible_test/_internal/dev/__init__.py @@ -0,0 +1,2 @@ +"""Development and testing support code. Enabled through the use of `--dev-*` command line options.""" +from __future__ import annotations diff --git a/test/lib/ansible_test/_internal/dev/container_probe.py b/test/lib/ansible_test/_internal/dev/container_probe.py new file mode 100644 index 0000000000..efce383d0f --- /dev/null +++ b/test/lib/ansible_test/_internal/dev/container_probe.py @@ -0,0 +1,216 @@ +"""Diagnostic utilities to probe container cgroup behavior during development and testing (both manual and integration).""" +from __future__ import annotations + +import dataclasses +import enum +import json +import os +import pathlib +import pwd +import typing as t + +from ..io import ( + read_text_file, + write_text_file, +) + +from ..util import ( + display, + ANSIBLE_TEST_TARGET_ROOT, +) + +from ..config import ( + EnvironmentConfig, +) + +from ..docker_util import ( + LOGINUID_NOT_SET, + docker_exec, + get_docker_info, + get_podman_remote, + require_docker, +) + +from ..host_configs import ( + DockerConfig, +) + +from ..cgroup import ( + CGroupEntry, + CGroupPath, + MountEntry, + MountType, +) + + +class CGroupState(enum.Enum): + """The expected state of a cgroup related mount point.""" + HOST = enum.auto() + PRIVATE = enum.auto() + SHADOWED = enum.auto() + + +@dataclasses.dataclass(frozen=True) +class CGroupMount: + """Details on a cgroup mount point that is expected to be present in the container.""" + path: str + type: t.Optional[str] + writable: t.Optional[bool] + state: t.Optional[CGroupState] + + def __post_init__(self): + assert is_relative_to(pathlib.PurePosixPath(self.path), CGroupPath.ROOT) + + if self.type is None: + assert self.state is None + elif self.type == MountType.TMPFS: + assert self.writable is True + assert self.state is None + else: + assert self.type in (MountType.CGROUP_V1, MountType.CGROUP_V2) + assert self.state is not None + + +def check_container_cgroup_status(args: EnvironmentConfig, config: DockerConfig, container_name: str, expected_mounts: tuple[CGroupMount, ...]) -> None: + """Check the running container to examine the state of the cgroup hierarchies.""" + cmd = ['sh', '-c', 'cat /proc/1/cgroup && echo && cat /proc/1/mountinfo'] + + stdout = docker_exec(args, container_name, cmd, capture=True)[0] + cgroups_stdout, mounts_stdout = stdout.split('\n\n') + + cgroups = CGroupEntry.loads(cgroups_stdout) + mounts = MountEntry.loads(mounts_stdout) + + mounts = tuple(mount for mount in mounts if is_relative_to(mount.path, CGroupPath.ROOT)) + + mount_cgroups: dict[MountEntry, CGroupEntry] = {} + probe_paths: dict[pathlib.PurePosixPath, t.Optional[str]] = {} + + for cgroup in cgroups: + if cgroup.subsystem: + mount = ([mount for mount in mounts if + mount.type == MountType.CGROUP_V1 and + is_relative_to(mount.path, cgroup.root_path) and + is_relative_to(cgroup.full_path, mount.path) + ] or [None])[-1] + else: + mount = ([mount for mount in mounts if + mount.type == MountType.CGROUP_V2 and + mount.path == cgroup.root_path + ] or [None])[-1] + + if mount: + mount_cgroups[mount] = cgroup + + for mount in mounts: + probe_paths[mount.path] = None + + if (cgroup := mount_cgroups.get(mount)) and cgroup.full_path != mount.path: # child of mount.path + probe_paths[cgroup.full_path] = None + + probe_script = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'probe_cgroups.py')) + probe_command = [config.python.path, '-', f'{container_name}-probe'] + [str(path) for path in probe_paths] + probe_results = json.loads(docker_exec(args, container_name, probe_command, capture=True, data=probe_script)[0]) + + for path in probe_paths: + probe_paths[path] = probe_results[str(path)] + + remaining_mounts: dict[pathlib.PurePosixPath, MountEntry] = {mount.path: mount for mount in mounts} + results: dict[pathlib.PurePosixPath, tuple[bool, str]] = {} + + for expected_mount in expected_mounts: + expected_path = pathlib.PurePosixPath(expected_mount.path) + + if not (actual_mount := remaining_mounts.pop(expected_path, None)): + results[expected_path] = (False, 'not mounted') + continue + + actual_mount_write_error = probe_paths[actual_mount.path] + actual_mount_errors = [] + + if cgroup := mount_cgroups.get(actual_mount): + if expected_mount.state == CGroupState.SHADOWED: + actual_mount_errors.append('unexpected cgroup association') + + if cgroup.root_path == cgroup.full_path and expected_mount.state == CGroupState.HOST: + results[cgroup.root_path.joinpath('???')] = (False, 'missing cgroup') + + if cgroup.full_path == actual_mount.path: + if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE: + actual_mount_errors.append('unexpected mount') + else: + cgroup_write_error = probe_paths[cgroup.full_path] + cgroup_errors = [] + + if expected_mount.state == CGroupState.SHADOWED: + cgroup_errors.append('unexpected cgroup association') + + if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE: + cgroup_errors.append('unexpected cgroup') + + if cgroup_write_error: + cgroup_errors.append(cgroup_write_error) + + if cgroup_errors: + results[cgroup.full_path] = (False, f'directory errors: {", ".join(cgroup_errors)}') + else: + results[cgroup.full_path] = (True, 'directory (writable)') + elif expected_mount.state not in (None, CGroupState.SHADOWED): + actual_mount_errors.append('missing cgroup association') + + if actual_mount.type != expected_mount.type and expected_mount.type is not None: + actual_mount_errors.append(f'type not {expected_mount.type}') + + if bool(actual_mount_write_error) == expected_mount.writable: + actual_mount_errors.append(f'{actual_mount_write_error or "writable"}') + + if actual_mount_errors: + results[actual_mount.path] = (False, f'{actual_mount.type} errors: {", ".join(actual_mount_errors)}') + else: + results[actual_mount.path] = (True, f'{actual_mount.type} ({actual_mount_write_error or "writable"})') + + for remaining_mount in remaining_mounts.values(): + remaining_mount_write_error = probe_paths[remaining_mount.path] + + results[remaining_mount.path] = (False, f'unexpected {remaining_mount.type} mount ({remaining_mount_write_error or "writable"})') + + identity = get_identity(args, config, container_name) + messages: list[tuple[pathlib.PurePosixPath, bool, str]] = [(path, result[0], result[1]) for path, result in sorted(results.items())] + message = '\n'.join(f'{"PASS" if result else "FAIL"}: {path} -> {message}' for path, result, message in messages) + + display.info(f'>>> Container: {identity}\n{message.rstrip()}') + + if args.dev_probe_cgroups: + write_text_file(os.path.join(args.dev_probe_cgroups, f'{identity}.log'), message) + + +def get_identity(args: EnvironmentConfig, config: DockerConfig, container_name: str): + """Generate and return an identity string to use when logging test results.""" + engine = require_docker().command + + try: + loginuid = int(read_text_file('/proc/self/loginuid')) + except FileNotFoundError: + loginuid = LOGINUID_NOT_SET + + user = pwd.getpwuid(os.getuid()).pw_name + login_user = user if loginuid == LOGINUID_NOT_SET else pwd.getpwuid(loginuid).pw_name + remote = engine == 'podman' and get_podman_remote() + + tags = ( + config.name, + engine, + f'cgroup={config.cgroup.value}@{get_docker_info(args).cgroup_version}', + f'remote={remote}', + f'user={user}', + f'loginuid={login_user}', + container_name, + ) + + return '|'.join(tags) + + +def is_relative_to(first: pathlib.PurePosixPath, second: t.Union[pathlib.PurePosixPath, str]) -> bool: + """Return True if path `first` is relative to path `second`, otherwise return False.""" + second_path = pathlib.PurePosixPath(second) + return second_path == first or second_path in first.parents diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py index da113f02a1..398145b49d 100644 --- a/test/lib/ansible_test/_internal/docker_util.py +++ b/test/lib/ansible_test/_internal/docker_util.py @@ -1,18 +1,17 @@ """Functions for accessing docker via the docker cli.""" from __future__ import annotations +import dataclasses +import enum import json import os -import random +import pathlib +import re import socket import time import urllib.parse import typing as t -from .io import ( - read_text_file, -) - from .util import ( ApplicationError, common_environment, @@ -20,6 +19,7 @@ from .util import ( find_executable, SubprocessError, cache, + OutputStream, ) from .util_common import ( @@ -29,7 +29,17 @@ from .util_common import ( from .config import ( CommonConfig, - EnvironmentConfig, +) + +from .thread import ( + mutex, + named_lock, +) + +from .cgroup import ( + CGroupEntry, + MountEntry, + MountType, ) DOCKER_COMMANDS = [ @@ -37,6 +47,379 @@ DOCKER_COMMANDS = [ 'podman', ] +UTILITY_IMAGE = 'quay.io/ansible/ansible-test-utility-container:2.0.0' + +# Max number of open files in a docker container. +# Passed with --ulimit option to the docker run command. +MAX_NUM_OPEN_FILES = 10240 + +# The value of /proc/*/loginuid when it is not set. +# It is a reserved UID, which is the maximum 32-bit unsigned integer value. +# See: https://access.redhat.com/solutions/25404 +LOGINUID_NOT_SET = 4294967295 + + +class DockerInfo: + """The results of `docker info` and `docker version` for the container runtime.""" + + @classmethod + def init(cls, args: CommonConfig) -> DockerInfo: + """Initialize and return a DockerInfo instance.""" + command = require_docker().command + + info_stdout = docker_command(args, ['info', '--format', '{{ json . }}'], capture=True, always=True)[0] + info = json.loads(info_stdout) + + if server_errors := info.get('ServerErrors'): + # This can occur when a remote docker instance is in use and the instance is not responding, such as when the system is still starting up. + # In that case an error such as the following may be returned: + # error during connect: Get "http://{hostname}:2375/v1.24/info": dial tcp {ip_address}:2375: connect: no route to host + raise ApplicationError('Unable to get container host information: ' + '\n'.join(server_errors)) + + version_stdout = docker_command(args, ['version', '--format', '{{ json . }}'], capture=True, always=True)[0] + version = json.loads(version_stdout) + + info = DockerInfo(args, command, info, version) + + return info + + def __init__(self, args: CommonConfig, engine: str, info: dict[str, t.Any], version: dict[str, t.Any]) -> None: + self.args = args + self.engine = engine + self.info = info + self.version = version + + @property + def client(self) -> dict[str, t.Any]: + """The client version details.""" + client = self.version.get('Client') + + if not client: + raise ApplicationError('Unable to get container host client information.') + + return client + + @property + def server(self) -> dict[str, t.Any]: + """The server version details.""" + server = self.version.get('Server') + + if not server: + if self.engine == 'podman': + # Some Podman versions always report server version info (verified with 1.8.0 and 1.9.3). + # Others do not unless Podman remote is being used. + # To provide consistency, use the client version if the server version isn't provided. + # See: https://github.com/containers/podman/issues/2671#issuecomment-804382934 + return self.client + + raise ApplicationError('Unable to get container host server information.') + + return server + + @property + def client_version(self) -> str: + """The client version.""" + return self.client['Version'] + + @property + def server_version(self) -> str: + """The server version.""" + return self.server['Version'] + + @property + def client_major_minor_version(self) -> tuple[int, int]: + """The client major and minor version.""" + major, minor = self.client_version.split('.')[:2] + return int(major), int(minor) + + @property + def server_major_minor_version(self) -> tuple[int, int]: + """The server major and minor version.""" + major, minor = self.server_version.split('.')[:2] + return int(major), int(minor) + + @property + def cgroupns_option_supported(self) -> bool: + """Return True if the `--cgroupns` option is supported, otherwise return False.""" + if self.engine == 'docker': + # Docker added support for the `--cgroupns` option in version 20.10. + # Both the client and server must support the option to use it. + # See: https://docs.docker.com/engine/release-notes/#20100 + return self.client_major_minor_version >= (20, 10) and self.server_major_minor_version >= (20, 10) + + raise NotImplementedError(self.engine) + + @property + def cgroup_version(self) -> int: + """The cgroup version of the container host.""" + info = self.info + host = info.get('host') + + # When the container host reports cgroup v1 it is running either cgroup v1 legacy mode or cgroup v2 hybrid mode. + # When the container host reports cgroup v2 it is running under cgroup v2 unified mode. + # See: https://github.com/containers/podman/blob/8356621249e36ed62fc7f35f12d17db9027ff076/libpod/info_linux.go#L52-L56 + # See: https://github.com/moby/moby/blob/d082bbcc0557ec667faca81b8b33bec380b75dac/daemon/info_unix.go#L24-L27 + + if host: + return int(host['cgroupVersion'].lstrip('v')) # podman + + try: + return int(info['CgroupVersion']) # docker + except KeyError: + pass + + # Docker 20.10 (API version 1.41) added support for cgroup v2. + # Unfortunately the client or server is too old to report the cgroup version. + # If the server is old, we can infer the cgroup version. + # Otherwise, we'll need to fall back to detection. + # See: https://docs.docker.com/engine/release-notes/#20100 + # See: https://docs.docker.com/engine/api/version-history/#v141-api-changes + + if self.server_major_minor_version < (20, 10): + return 1 # old docker server with only cgroup v1 support + + # Tell the user what versions they have and recommend they upgrade the client. + # Downgrading the server should also work, but we won't mention that. + message = ( + f'The Docker client version is {self.client_version}. ' + f'The Docker server version is {self.server_version}. ' + 'Upgrade your Docker client to version 20.10 or later.' + ) + + if detect_host_properties(self.args).cgroup_v2: + # Unfortunately cgroup v2 was detected on the Docker server. + # A newer client is needed to support the `--cgroupns` option for use with cgroup v2. + raise ApplicationError(f'Unsupported Docker client and server combination using cgroup v2. {message}') + + display.warning(f'Detected Docker server cgroup v1 using probing. {message}', unique=True) + + return 1 # docker server is using cgroup v1 (or cgroup v2 hybrid) + + @property + def docker_desktop_wsl2(self) -> bool: + """Return True if Docker Desktop integrated with WSL2 is detected, otherwise False.""" + info = self.info + + kernel_version = info.get('KernelVersion') + operating_system = info.get('OperatingSystem') + + dd_wsl2 = kernel_version and kernel_version.endswith('-WSL2') and operating_system == 'Docker Desktop' + + return dd_wsl2 + + @property + def description(self) -> str: + """Describe the container runtime.""" + tags = dict( + client=self.client_version, + server=self.server_version, + cgroup=f'v{self.cgroup_version}', + ) + + labels = [self.engine] + [f'{key}={value}' for key, value in tags.items()] + + if self.docker_desktop_wsl2: + labels.append('DD+WSL2') + + return f'Container runtime: {" ".join(labels)}' + + +@mutex +def get_docker_info(args: CommonConfig) -> DockerInfo: + """Return info for the current container runtime. The results are cached.""" + try: + return get_docker_info.info # type: ignore[attr-defined] + except AttributeError: + pass + + info = DockerInfo.init(args) + + display.info(info.description, verbosity=1) + + get_docker_info.info = info # type: ignore[attr-defined] + + return info + + +class SystemdControlGroupV1Status(enum.Enum): + """The state of the cgroup v1 systemd hierarchy on the container host.""" + SUBSYSTEM_MISSING = 'The systemd cgroup subsystem was not found.' + FILESYSTEM_NOT_MOUNTED = 'The "/sys/fs/cgroup/systemd" filesystem is not mounted.' + MOUNT_TYPE_NOT_CORRECT = 'The "/sys/fs/cgroup/systemd" mount type is not correct.' + VALID = 'The "/sys/fs/cgroup/systemd" mount is valid.' + + +@dataclasses.dataclass(frozen=True) +class ContainerHostProperties: + """Container host properties detected at run time.""" + audit_code: str + max_open_files: int + loginuid: t.Optional[int] + cgroup_v1: SystemdControlGroupV1Status + cgroup_v2: bool + + +@mutex +def detect_host_properties(args: CommonConfig) -> ContainerHostProperties: + """ + Detect and return properties of the container host. + + The information collected is: + + - The errno result from attempting to query the container host's audit status. + - The max number of open files supported by the container host to run containers. + This value may be capped to the maximum value used by ansible-test. + If the value is below the desired limit, a warning is displayed. + - The loginuid used by the container host to run containers, or None if the audit subsystem is unavailable. + - The cgroup subsystems registered with the Linux kernel. + - The mounts visible within a container. + - The status of the systemd cgroup v1 hierarchy. + + This information is collected together to reduce the number of container runs to probe the container host. + """ + try: + return detect_host_properties.properties # type: ignore[attr-defined] + except AttributeError: + pass + + single_line_commands = ( + 'audit-status', + 'cat /proc/sys/fs/nr_open', + 'ulimit -Hn', + '(cat /proc/1/loginuid; echo)', + ) + + multi_line_commands = ( + ' && '.join(single_line_commands), + 'cat /proc/1/cgroup', + 'cat /proc/1/mountinfo', + ) + + options = ['--volume', '/sys/fs/cgroup:/probe:ro'] + cmd = ['sh', '-c', ' && echo "-" && '.join(multi_line_commands)] + + stdout = run_utility_container(args, f'ansible-test-probe-{args.session_name}', cmd, options)[0] + + if args.explain: + return ContainerHostProperties( + audit_code='???', + max_open_files=MAX_NUM_OPEN_FILES, + loginuid=LOGINUID_NOT_SET, + cgroup_v1=SystemdControlGroupV1Status.VALID, + cgroup_v2=False, + ) + + blocks = stdout.split('\n-\n') + + values = blocks[0].split('\n') + + audit_parts = values[0].split(' ', 1) + audit_status = int(audit_parts[0]) + audit_code = audit_parts[1] + + system_limit = int(values[1]) + hard_limit = int(values[2]) + loginuid = int(values[3]) if values[3] else None + + cgroups = CGroupEntry.loads(blocks[1]) + mounts = MountEntry.loads(blocks[2]) + + if hard_limit < MAX_NUM_OPEN_FILES and hard_limit < system_limit and require_docker().command == 'docker': + # Podman will use the highest possible limits, up to its default of 1M. + # See: https://github.com/containers/podman/blob/009afb50b308548eb129bc68e654db6c6ad82e7a/pkg/specgen/generate/oci.go#L39-L58 + # Docker limits are less predictable. They could be the system limit or the user's soft limit. + # If Docker is running as root it should be able to use the system limit. + # When Docker reports a limit below the preferred value and the system limit, attempt to use the preferred value, up to the system limit. + options = ['--ulimit', f'nofile={min(system_limit, MAX_NUM_OPEN_FILES)}'] + cmd = ['sh', '-c', 'ulimit -Hn'] + + try: + stdout = run_utility_container(args, f'ansible-test-ulimit-{args.session_name}', cmd, options)[0] + except SubprocessError as ex: + display.warning(str(ex)) + else: + hard_limit = int(stdout) + + # Check the audit error code from attempting to query the container host's audit status. + # + # The following error codes are known to occur: + # + # EPERM - Operation not permitted + # This occurs when the root user runs a container but lacks the AUDIT_WRITE capability. + # This will cause patched versions of OpenSSH to disconnect after a login succeeds. + # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch + # + # EBADF - Bad file number + # This occurs when the host doesn't support the audit system (the open_audit call fails). + # This allows SSH logins to succeed despite the failure. + # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/netlink.c#L204-L209 + # + # ECONNREFUSED - Connection refused + # This occurs when a non-root user runs a container without the AUDIT_WRITE capability. + # When sending an audit message, libaudit ignores this error condition. + # This allows SSH logins to succeed despite the failure. + # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/deprecated.c#L48-L52 + + subsystems = set(cgroup.subsystem for cgroup in cgroups) + mount_types = {mount.path: mount.type for mount in mounts} + + if 'systemd' not in subsystems: + cgroup_v1 = SystemdControlGroupV1Status.SUBSYSTEM_MISSING + elif not (mount_type := mount_types.get(pathlib.PurePosixPath('/probe/systemd'))): + cgroup_v1 = SystemdControlGroupV1Status.FILESYSTEM_NOT_MOUNTED + elif mount_type != MountType.CGROUP_V1: + cgroup_v1 = SystemdControlGroupV1Status.MOUNT_TYPE_NOT_CORRECT + else: + cgroup_v1 = SystemdControlGroupV1Status.VALID + + cgroup_v2 = mount_types.get(pathlib.PurePosixPath('/probe')) == MountType.CGROUP_V2 + + display.info(f'Container host audit status: {audit_code} ({audit_status})', verbosity=1) + display.info(f'Container host max open files: {hard_limit}', verbosity=1) + display.info(f'Container loginuid: {loginuid if loginuid is not None else "unavailable"}' + f'{" (not set)" if loginuid == LOGINUID_NOT_SET else ""}', verbosity=1) + + if hard_limit < MAX_NUM_OPEN_FILES: + display.warning(f'Unable to set container max open files to {MAX_NUM_OPEN_FILES}. Using container host limit of {hard_limit} instead.') + else: + hard_limit = MAX_NUM_OPEN_FILES + + properties = ContainerHostProperties( + # The errno (audit_status) is intentionally not exposed here, as it can vary across systems and architectures. + # Instead, the symbolic name (audit_code) is used, which is resolved inside the container which generated the error. + # See: https://man7.org/linux/man-pages/man3/errno.3.html + audit_code=audit_code, + max_open_files=hard_limit, + loginuid=loginuid, + cgroup_v1=cgroup_v1, + cgroup_v2=cgroup_v2, + ) + + detect_host_properties.properties = properties # type: ignore[attr-defined] + + return properties + + +def run_utility_container( + args: CommonConfig, + name: str, + cmd: list[str], + options: list[str], + data: t.Optional[str] = None, +) -> tuple[t.Optional[str], t.Optional[str]]: + """Run the specified command using the ansible-test utility container, returning stdout and stderr.""" + options = options + [ + '--name', name, + '--rm', + ] + + if data: + options.append('-i') + + docker_pull(args, UTILITY_IMAGE) + + return docker_run(args, UTILITY_IMAGE, options, cmd, data) + class DockerCommand: """Details about the available docker command.""" @@ -57,7 +440,7 @@ class DockerCommand: executable = find_executable(command, required=False) if executable: - version = raw_command([command, '-v'], capture=True)[0].strip() + version = raw_command([command, '-v'], env=docker_environment(), capture=True)[0].strip() if command == 'docker' and 'podman' in version: continue # avoid detecting podman as docker @@ -118,162 +501,215 @@ def get_docker_hostname(): # type: () -> str @cache -def get_docker_container_id(): # type: () -> t.Optional[str] - """Return the current container ID if running in a container, otherwise return None.""" - path = '/proc/self/cpuset' - container_id = None - - if os.path.exists(path): - # File content varies based on the environment: - # No Container: / - # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507 - # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891 - # Podman: /../../../../../.. - contents = read_text_file(path) +def get_podman_host_ip(): # type: () -> str + """Return the IP of the Podman host.""" + podman_host_ip = socket.gethostbyname(get_podman_hostname()) - cgroup_path, cgroup_name = os.path.split(contents.strip()) + display.info('Detected Podman host IP: %s' % podman_host_ip, verbosity=1) - if cgroup_path in ('/docker', '/azpl_job'): - container_id = cgroup_name + return podman_host_ip - if container_id: - display.info('Detected execution in Docker container: %s' % container_id, verbosity=1) - - return container_id +@cache +def get_podman_default_hostname(): # type: () -> t.Optional[str] + """Return the default hostname of the Podman service. -def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str - """ - Return the preferred network name for use with Docker. The selection logic is: - - the network selected by the user with `--docker-network` - - the network of the currently running docker container (if any) - - the default docker network (returns None) + --format was added in podman 3.3.0, this functionality depends on it's availability """ + hostname = None try: - return get_docker_preferred_network_name.network - except AttributeError: - pass - - network = None + stdout = raw_command(['podman', 'system', 'connection', 'list', '--format=json'], env=docker_environment(), capture=True)[0] + except SubprocessError: + stdout = '[]' - if args.docker_network: - network = args.docker_network - else: - current_container_id = get_docker_container_id() + try: + connections = json.loads(stdout) + except json.decoder.JSONDecodeError: + return hostname - if current_container_id: - # Make sure any additional containers we launch use the same network as the current container we're running in. - # This is needed when ansible-test is running in a container that is not connected to Docker's default network. - container = docker_inspect(args, current_container_id, always=True) - network = container.get_network_name() + for connection in connections: + # A trailing indicates the default + if connection['Name'][-1] == '*': + hostname = connection['URI'] + break - get_docker_preferred_network_name.network = network + return hostname - return network +@cache +def get_podman_remote(): # type: () -> t.Optional[str] + """Return the remote podman hostname, if any, otherwise return None.""" + # URL value resolution precedence: + # - command line value + # - environment variable CONTAINER_HOST + # - containers.conf + # - unix://run/podman/podman.sock + hostname = None + + podman_host = os.environ.get('CONTAINER_HOST') + if not podman_host: + podman_host = get_podman_default_hostname() + + if podman_host and podman_host.startswith('ssh://'): + try: + hostname = urllib.parse.urlparse(podman_host).hostname + except ValueError: + display.warning('Could not parse podman URI "%s"' % podman_host) + else: + display.info('Detected Podman remote: %s' % hostname, verbosity=1) + return hostname -def is_docker_user_defined_network(network): # type: (str) -> bool - """Return True if the network being used is a user-defined network.""" - return network and network != 'bridge' +@cache +def get_podman_hostname(): # type: () -> str + """Return the hostname of the Podman service.""" + hostname = get_podman_remote() -def docker_pull(args, image): # type: (EnvironmentConfig, str) -> None - """ - Pull the specified image if it is not available. - Images without a tag or digest will not be pulled. - Retries up to 10 times if the pull fails. - """ - if '@' not in image and ':' not in image: - display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2) - return + if not hostname: + hostname = 'localhost' + display.info('Assuming Podman is available on localhost.', verbosity=1) - if docker_image_exists(args, image): - display.info('Skipping pull of existing image: %s' % image, verbosity=2) - return + return hostname - for _iteration in range(1, 10): - try: - docker_command(args, ['pull', image]) - return - except SubprocessError: - display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image) - time.sleep(3) - raise ApplicationError('Failed to pull docker image "%s".' % image) +@cache +def get_docker_container_id(): # type: () -> t.Optional[str] + """Return the current container ID if running in a container, otherwise return None.""" + mountinfo_path = pathlib.Path('/proc/self/mountinfo') + container_id = None + engine = None + + if mountinfo_path.is_file(): + # NOTE: This method of detecting the container engine and container ID relies on implementation details of each container engine. + # Although the implementation details have remained unchanged for some time, there is no guarantee they will continue to work. + # There have been proposals to create a standard mechanism for this, but none is currently available. + # See: https://github.com/opencontainers/runtime-spec/issues/1105 + + mounts = MountEntry.loads(mountinfo_path.read_text()) + + for mount in mounts: + if str(mount.path) == '/etc/hostname': + # Podman generates /etc/hostname in the makePlatformBindMounts function. + # That function ends up using ContainerRunDirectory to generate a path like: {prefix}/{container_id}/userdata/hostname + # NOTE: The {prefix} portion of the path can vary, so should not be relied upon. + # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/libpod/container_internal_linux.go#L660-L664 + # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/vendor/github.com/containers/storage/store.go#L3133 + # This behavior has existed for ~5 years and was present in Podman version 0.2. + # See: https://github.com/containers/podman/pull/248 + if match := re.search('/(?P<id>[0-9a-f]{64})/userdata/hostname$', str(mount.root)): + container_id = match.group('id') + engine = 'Podman' + break + + # Docker generates /etc/hostname in the BuildHostnameFile function. + # That function ends up using the containerRoot function to generate a path like: {prefix}/{container_id}/hostname + # NOTE: The {prefix} portion of the path can vary, so should not be relied upon. + # See: https://github.com/moby/moby/blob/cd8a090e6755bee0bdd54ac8a894b15881787097/container/container_unix.go#L58 + # See: https://github.com/moby/moby/blob/92e954a2f05998dc05773b6c64bbe23b188cb3a0/daemon/container.go#L86 + # This behavior has existed for at least ~7 years and was present in Docker version 1.0.1. + # See: https://github.com/moby/moby/blob/v1.0.1/daemon/container.go#L351 + # See: https://github.com/moby/moby/blob/v1.0.1/daemon/daemon.go#L133 + if match := re.search('/(?P<id>[0-9a-f]{64})/hostname$', str(mount.root)): + container_id = match.group('id') + engine = 'Docker' + break + if container_id: + display.info(f'Detected execution in {engine} container ID: {container_id}', verbosity=1) -def docker_cp_to(args, container_id, src, dst): # type: (EnvironmentConfig, str, str, str) -> None - """Copy a file to the specified container.""" - docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)]) + return container_id -def docker_run( - args, # type: EnvironmentConfig - image, # type: str - options, # type: t.Optional[t.List[str]] - cmd=None, # type: t.Optional[t.List[str]] - create_only=False, # type: bool -): # type: (...) -> str - """Run a container using the given docker image.""" - if not options: - options = [] +def docker_pull(args, image): # type: (CommonConfig, str) -> None + """ + Pull the specified image if it is not available. + Images without a tag or digest will not be pulled. + Retries up to 10 times if the pull fails. + A warning will be shown for any image with volumes defined. + Images will be pulled only once. + Concurrent pulls for the same image will block until the first completes. + """ + with named_lock(f'docker_pull:{image}') as first: + if first: + __docker_pull(args, image) - if not cmd: - cmd = [] - if create_only: - command = 'create' +def __docker_pull(args: CommonConfig, image: str) -> None: + """Internal implementation for docker_pull. Do not call directly.""" + if '@' not in image and ':' not in image: + display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2) + inspect = docker_image_inspect(args, image) + elif inspect := docker_image_inspect(args, image, always=True): + display.info('Skipping pull of existing image: %s' % image, verbosity=2) else: - command = 'run' + for _iteration in range(1, 10): + try: + docker_command(args, ['pull', image], capture=False) - network = get_docker_preferred_network_name(args) + if (inspect := docker_image_inspect(args, image)) or args.explain: + break - if is_docker_user_defined_network(network): - # Only when the network is not the default bridge network. - options.extend(['--network', network]) + display.warning(f'Image "{image}" not found after pull completed. Waiting a few seconds before trying again.') + except SubprocessError: + display.warning(f'Failed to pull container image "{image}". Waiting a few seconds before trying again.') + time.sleep(3) + else: + raise ApplicationError(f'Failed to pull container image "{image}".') - for _iteration in range(1, 3): - try: - stdout = docker_command(args, [command] + options + [image] + cmd, capture=True)[0] + if inspect and inspect.volumes: + display.warning(f'Image "{image}" contains {len(inspect.volumes)} volume(s): {", ".join(sorted(inspect.volumes))}\n' + 'This may result in leaking anonymous volumes. It may also prevent the image from working on some hosts or container engines.\n' + 'The image should be rebuilt without the use of the VOLUME instruction.', + unique=True) - if args.explain: - return ''.join(random.choice('0123456789abcdef') for _iteration in range(64)) - return stdout.strip() - except SubprocessError as ex: - display.error(ex) - display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image) - time.sleep(3) +def docker_cp_to(args, container_id, src, dst): # type: (CommonConfig, str, str, str) -> None + """Copy a file to the specified container.""" + docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)], capture=True) - raise ApplicationError('Failed to run docker image "%s".' % image) +def docker_create( + args: CommonConfig, + image: str, + options: list[str], + cmd: list[str] = None, +) -> tuple[t.Optional[str], t.Optional[str]]: + """Create a container using the given docker image.""" + return docker_command(args, ['create'] + options + [image] + cmd, capture=True) -def docker_start(args, container_id, options=None): # type: (EnvironmentConfig, str, t.Optional[t.List[str]]) -> (t.Optional[str], t.Optional[str]) - """ - Start a docker container by name or ID - """ - if not options: - options = [] - for _iteration in range(1, 3): - try: - return docker_command(args, ['start'] + options + [container_id], capture=True) - except SubprocessError as ex: - display.error(ex) - display.warning('Failed to start docker container "%s". Waiting a few seconds before trying again.' % container_id) - time.sleep(3) +def docker_run( + args: CommonConfig, + image: str, + options: list[str], + cmd: list[str] = None, + data: t.Optional[str] = None, +) -> tuple[t.Optional[str], t.Optional[str]]: + """Run a container using the given docker image.""" + return docker_command(args, ['run'] + options + [image] + cmd, data=data, capture=True) + - raise ApplicationError('Failed to run docker container "%s".' % container_id) +def docker_start( + args: CommonConfig, + container_id: str, + options: list[str], +) -> tuple[t.Optional[str], t.Optional[str]]: + """Start a container by name or ID.""" + return docker_command(args, ['start'] + options + [container_id], capture=True) -def docker_rm(args, container_id): # type: (EnvironmentConfig, str) -> None +def docker_rm(args, container_id): # type: (CommonConfig, str) -> None """Remove the specified container.""" try: - docker_command(args, ['rm', '-f', container_id], capture=True) + # Stop the container with SIGKILL immediately, then remove the container. + # Podman supports the `--time` option on `rm`, but only since version 4.0.0. + # Docker does not support the `--time` option on `rm`. + docker_command(args, ['stop', '--time', '0', container_id], capture=True) + docker_command(args, ['rm', container_id], capture=True) except SubprocessError as ex: - if 'no such container' in ex.stderr: - pass # podman does not handle this gracefully, exits 1 - else: + # Both Podman and Docker report an error if the container does not exist. + # The error messages contain the same "no such container" string, differing only in capitalization. + if 'no such container' not in ex.stderr.lower(): raise ex @@ -291,7 +727,7 @@ class ContainerNotFoundError(DockerError): class DockerInspect: """The results of `docker inspect` for a single container.""" - def __init__(self, args, inspection): # type: (EnvironmentConfig, t.Dict[str, t.Any]) -> None + def __init__(self, args, inspection): # type: (CommonConfig, t.Dict[str, t.Any]) -> None self.args = args self.inspection = inspection @@ -335,6 +771,14 @@ class DockerInspect: return self.state['Running'] @property + def pid(self) -> int: + """Return the PID of the init process.""" + if self.args.explain: + return 0 + + return self.state['Pid'] + + @property def env(self): # type: () -> t.List[str] """Return a list of the environment variables used to create the container.""" return self.config['Env'] @@ -373,33 +817,14 @@ class DockerInspect: return networks[0] - def get_ip_address(self): # type: () -> t.Optional[str] - """Return the IP address of the container for the preferred docker network.""" - if self.networks: - network_name = get_docker_preferred_network_name(self.args) - - if not network_name: - # Sort networks and use the first available. - # This assumes all containers will have access to the same networks. - network_name = sorted(self.networks.keys()).pop(0) - - ipaddress = self.networks[network_name]['IPAddress'] - else: - ipaddress = self.network_settings['IPAddress'] - - if not ipaddress: - return None - - return ipaddress - -def docker_inspect(args, identifier, always=False): # type: (EnvironmentConfig, str, bool) -> DockerInspect +def docker_inspect(args, identifier, always=False): # type: (CommonConfig, str, bool) -> DockerInspect """ - Return the results of `docker inspect` for the specified container. + Return the results of `docker container inspect` for the specified container. Raises a ContainerNotFoundError if the container was not found. """ try: - stdout = docker_command(args, ['inspect', identifier], capture=True, always=always)[0] + stdout = docker_command(args, ['container', 'inspect', identifier], capture=True, always=always)[0] except SubprocessError as ex: stdout = ex.stdout @@ -414,29 +839,118 @@ def docker_inspect(args, identifier, always=False): # type: (EnvironmentConfig, raise ContainerNotFoundError(identifier) -def docker_network_disconnect(args, container_id, network): # type: (EnvironmentConfig, str, str) -> None +def docker_network_disconnect(args, container_id, network): # type: (CommonConfig, str, str) -> None """Disconnect the specified docker container from the given network.""" docker_command(args, ['network', 'disconnect', network, container_id], capture=True) -def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool - """Return True if the image exists, otherwise False.""" +class DockerImageInspect: + """The results of `docker image inspect` for a single image.""" + def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None: + self.args = args + self.inspection = inspection + + # primary properties + + @property + def config(self) -> dict[str, t.Any]: + """Return a dictionary of the image config.""" + return self.inspection['Config'] + + # nested properties + + @property + def volumes(self) -> dict[str, t.Any]: + """Return a dictionary of the image volumes.""" + return self.config.get('Volumes') or {} + + @property + def cmd(self) -> list[str]: + """The command to run when the container starts.""" + return self.config['Cmd'] + + +@mutex +def docker_image_inspect(args: CommonConfig, image: str, always: bool = False) -> t.Optional[DockerImageInspect]: + """ + Return the results of `docker image inspect` for the specified image or None if the image does not exist. + """ + inspect_cache: dict[str, DockerImageInspect] + + try: + inspect_cache = docker_image_inspect.cache # type: ignore[attr-defined] + except AttributeError: + inspect_cache = docker_image_inspect.cache = {} # type: ignore[attr-defined] + + if inspect_result := inspect_cache.get(image): + return inspect_result + try: - docker_command(args, ['image', 'inspect', image], capture=True) + stdout = docker_command(args, ['image', 'inspect', image], capture=True, always=always)[0] except SubprocessError: - return False + stdout = '[]' + + if args.explain and not always: + items = [] + else: + items = json.loads(stdout) - return True + if len(items) > 1: + raise ApplicationError(f'Inspection of image "{image}" resulted in {len(items)} items:\n{json.dumps(items, indent=4)}') + + if len(items) == 1: + inspect_result = DockerImageInspect(args, items[0]) + inspect_cache[image] = inspect_result + return inspect_result + + return None + + +class DockerNetworkInspect: + """The results of `docker network inspect` for a single network.""" + def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None: + self.args = args + self.inspection = inspection + + +def docker_network_inspect(args: CommonConfig, network: str, always: bool = False) -> t.Optional[DockerNetworkInspect]: + """ + Return the results of `docker network inspect` for the specified network or None if the network does not exist. + """ + try: + stdout = docker_command(args, ['network', 'inspect', network], capture=True, always=always)[0] + except SubprocessError: + stdout = '[]' + + if args.explain and not always: + items = [] + else: + items = json.loads(stdout) + + if len(items) == 1: + return DockerNetworkInspect(args, items[0]) + + return None + + +def docker_logs(args: CommonConfig, container_id: str) -> None: + """Display logs for the specified container. If an error occurs, it is displayed rather than raising an exception.""" + try: + docker_command(args, ['logs', container_id], capture=False) + except SubprocessError as ex: + display.error(str(ex)) def docker_exec( - args, # type: EnvironmentConfig + args, # type: CommonConfig container_id, # type: str cmd, # type: t.List[str] + capture, # type: bool options=None, # type: t.Optional[t.List[str]] - capture=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + stdin=None, # type: t.Optional[t.IO[bytes]] + stdout=None, # type: t.Optional[t.IO[bytes]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] data=None, # type: t.Optional[str] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Execute the given command in the specified container.""" @@ -446,38 +960,45 @@ def docker_exec( if data or stdin or stdout: options.append('-i') - return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout, data=data) - - -def docker_info(args): # type: (CommonConfig) -> t.Dict[str, t.Any] - """Return a dictionary containing details from the `docker info` command.""" - stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True) - return json.loads(stdout) - - -def docker_version(args): # type: (CommonConfig) -> t.Dict[str, t.Any] - """Return a dictionary containing details from the `docker version` command.""" - stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True) - return json.loads(stdout) + return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, + output_stream=output_stream, data=data) def docker_command( args, # type: CommonConfig cmd, # type: t.List[str] - capture=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + capture, # type: bool + stdin=None, # type: t.Optional[t.IO[bytes]] + stdout=None, # type: t.Optional[t.IO[bytes]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] always=False, # type: bool data=None, # type: t.Optional[str] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified docker command.""" env = docker_environment() - command = require_docker().command - return run_command(args, [command] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always, data=data) + command = [require_docker().command] + + if command[0] == 'podman' and get_podman_remote(): + command.append('--remote') + + return run_command(args, command + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, always=always, + output_stream=output_stream, data=data) def docker_environment(): # type: () -> t.Dict[str, str] """Return a dictionary of docker related environment variables found in the current environment.""" env = common_environment() - env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_'))) + + var_names = { + 'XDG_RUNTIME_DIR', # podman + } + + var_prefixes = { + 'CONTAINER_', # podman remote + 'DOCKER_', # docker + } + + env.update({name: value for name, value in os.environ.items() if name in var_names or any(name.startswith(prefix) for prefix in var_prefixes)}) + return env diff --git a/test/lib/ansible_test/_internal/host_configs.py b/test/lib/ansible_test/_internal/host_configs.py index 41fb7a89b9..e69e706542 100644 --- a/test/lib/ansible_test/_internal/host_configs.py +++ b/test/lib/ansible_test/_internal/host_configs.py @@ -18,6 +18,8 @@ from .io import ( ) from .completion import ( + AuditMode, + CGroupVersion, CompletionConfig, docker_completion, DockerCompletionConfig, @@ -39,6 +41,7 @@ from .util import ( get_available_python_versions, str_to_version, version_to_str, + Architecture, ) @@ -183,8 +186,10 @@ class PosixConfig(HostConfig, metaclass=abc.ABCMeta): def get_defaults(self, context): # type: (HostContext) -> PosixCompletionConfig """Return the default settings.""" - def apply_defaults(self, context, defaults): # type: (HostContext, PosixCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, PosixCompletionConfig) + super().apply_defaults(context, defaults) self.python = self.python or NativePythonConfig() @@ -204,25 +209,29 @@ class RemoteConfig(HostConfig, metaclass=abc.ABCMeta): """Base class for remote host configuration.""" name: t.Optional[str] = None provider: t.Optional[str] = None + arch: t.Optional[str] = None @property - def platform(self): + def platform(self): # type: () -> str """The name of the platform.""" return self.name.partition('/')[0] @property - def version(self): + def version(self): # type: () -> str """The version of the platform.""" return self.name.partition('/')[2] - def apply_defaults(self, context, defaults): # type: (HostContext, RemoteCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, RemoteCompletionConfig) + super().apply_defaults(context, defaults) if self.provider == 'default': self.provider = None self.provider = self.provider or defaults.provider or 'aws' + self.arch = self.arch or defaults.arch or Architecture.X86_64 @property def is_managed(self): # type: () -> bool @@ -262,8 +271,9 @@ class InventoryConfig(HostConfig): """Return the default settings.""" return InventoryCompletionConfig() - def apply_defaults(self, context, defaults): # type: (HostContext, InventoryCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, InventoryCompletionConfig) @dataclasses.dataclass @@ -274,6 +284,8 @@ class DockerConfig(ControllerHostConfig, PosixConfig): memory: t.Optional[int] = None privileged: t.Optional[bool] = None seccomp: t.Optional[str] = None + cgroup: t.Optional[CGroupVersion] = None + audit: t.Optional[AuditMode] = None def get_defaults(self, context): # type: (HostContext) -> DockerCompletionConfig """Return the default settings.""" @@ -293,8 +305,10 @@ class DockerConfig(ControllerHostConfig, PosixConfig): return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()] - def apply_defaults(self, context, defaults): # type: (HostContext, DockerCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, DockerCompletionConfig) + super().apply_defaults(context, defaults) self.name = defaults.name @@ -303,6 +317,12 @@ class DockerConfig(ControllerHostConfig, PosixConfig): if self.seccomp is None: self.seccomp = defaults.seccomp + if self.cgroup is None: + self.cgroup = defaults.cgroup_enum + + if self.audit is None: + self.audit = defaults.audit_enum + if self.privileged is None: self.privileged = False @@ -323,7 +343,7 @@ class DockerConfig(ControllerHostConfig, PosixConfig): @dataclasses.dataclass class PosixRemoteConfig(RemoteConfig, ControllerHostConfig, PosixConfig): """Configuration for a POSIX remote host.""" - arch: t.Optional[str] = None + become: t.Optional[str] = None def get_defaults(self, context): # type: (HostContext) -> PosixRemoteCompletionConfig """Return the default settings.""" @@ -342,6 +362,14 @@ class PosixRemoteConfig(RemoteConfig, ControllerHostConfig, PosixConfig): return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()] + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None + """Apply default settings.""" + assert isinstance(defaults, PosixRemoteCompletionConfig) + + super().apply_defaults(context, defaults) + + self.become = self.become or defaults.become + @property def have_root(self): # type: () -> bool """True if root is available, otherwise False.""" @@ -358,9 +386,7 @@ class WindowsRemoteConfig(RemoteConfig, WindowsConfig): """Configuration for a remoe Windows host.""" def get_defaults(self, context): # type: (HostContext) -> WindowsRemoteCompletionConfig """Return the default settings.""" - return filter_completion(windows_completion()).get(self.name) or WindowsRemoteCompletionConfig( - name=self.name, - ) + return filter_completion(windows_completion()).get(self.name) or windows_completion().get(self.platform) @dataclasses.dataclass @@ -383,10 +409,13 @@ class NetworkRemoteConfig(RemoteConfig, NetworkConfig): """Return the default settings.""" return filter_completion(network_completion()).get(self.name) or NetworkRemoteCompletionConfig( name=self.name, + placeholder=True, ) - def apply_defaults(self, context, defaults): # type: (HostContext, NetworkRemoteCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, NetworkRemoteCompletionConfig) + super().apply_defaults(context, defaults) self.collection = self.collection or defaults.collection @@ -424,8 +453,10 @@ class ControllerConfig(PosixConfig): """Return the default settings.""" return context.controller_config.get_defaults(context) - def apply_defaults(self, context, defaults): # type: (HostContext, PosixCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, PosixCompletionConfig) + self.controller = context.controller_config if not self.python and not defaults.supported_pythons: @@ -449,7 +480,7 @@ class ControllerConfig(PosixConfig): class FallbackReason(enum.Enum): - """Reason fallback was peformed.""" + """Reason fallback was performed.""" ENVIRONMENT = enum.auto() PYTHON = enum.auto() diff --git a/test/lib/ansible_test/_internal/host_profiles.py b/test/lib/ansible_test/_internal/host_profiles.py index e3aeeeebbc..7ff919367c 100644 --- a/test/lib/ansible_test/_internal/host_profiles.py +++ b/test/lib/ansible_test/_internal/host_profiles.py @@ -4,11 +4,13 @@ from __future__ import annotations import abc import dataclasses import os +import shlex import tempfile import time import typing as t from .io import ( + read_text_file, write_text_file, ) @@ -40,6 +42,7 @@ from .host_configs import ( from .core_ci import ( AnsibleCoreCI, SshKey, + VmResource, ) from .util import ( @@ -50,16 +53,30 @@ from .util import ( get_type_map, sanitize_host_name, sorted_versions, + InternalError, + HostConnectionError, + ANSIBLE_TEST_TARGET_ROOT, ) from .util_common import ( + get_docs_url, intercept_python, ) from .docker_util import ( docker_exec, + docker_image_inspect, + docker_logs, + docker_pull, docker_rm, get_docker_hostname, + require_docker, + get_docker_info, + detect_host_properties, + run_utility_container, + SystemdControlGroupV1Status, + LOGINUID_NOT_SET, + UTILITY_IMAGE, ) from .bootstrap import ( @@ -96,24 +113,79 @@ from .connections import ( ) from .become import ( - Su, + Become, + SUPPORTED_BECOME_METHODS, Sudo, ) +from .completion import ( + AuditMode, + CGroupVersion, +) + +from .dev.container_probe import ( + CGroupMount, + CGroupPath, + CGroupState, + MountType, + check_container_cgroup_status, +) + TControllerHostConfig = t.TypeVar('TControllerHostConfig', bound=ControllerHostConfig) THostConfig = t.TypeVar('THostConfig', bound=HostConfig) TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig) TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig) +class ControlGroupError(ApplicationError): + """Raised when the container host does not have the necessary cgroup support to run a container.""" + def __init__(self, args: CommonConfig, reason: str) -> None: + engine = require_docker().command + dd_wsl2 = get_docker_info(args).docker_desktop_wsl2 + + message = f''' +{reason} + +Run the following commands as root on the container host to resolve this issue: + + mkdir /sys/fs/cgroup/systemd + mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr + chown -R {{user}}:{{group}} /sys/fs/cgroup/systemd # only when rootless + +NOTE: These changes must be applied each time the container host is rebooted. +'''.strip() + + podman_message = ''' + If rootless Podman is already running [1], you may need to stop it before + containers are able to use the new mount point. + +[1] Check for 'podman' and 'catatonit' processes. +''' + + dd_wsl_message = f''' + When using Docker Desktop with WSL2, additional configuration [1] is required. + +[1] {get_docs_url("https://docs.ansible.com/ansible-core/devel/dev_guide/testing_running_locally.html#docker-desktop-with-wsl2")} +''' + + if engine == 'podman': + message += podman_message + elif dd_wsl2: + message += dd_wsl_message + + message = message.strip() + + super().__init__(message) + + @dataclasses.dataclass(frozen=True) class Inventory: """Simple representation of an Ansible inventory.""" - host_groups: t.Dict[str, t.Dict[str, t.Dict[str, str]]] + host_groups: t.Dict[str, t.Dict[str, t.Dict[str, t.Union[str, int]]]] extra_groups: t.Optional[t.Dict[str, t.List[str]]] = None @staticmethod - def create_single_host(name, variables): # type: (str, t.Dict[str, str]) -> Inventory + def create_single_host(name, variables): # type: (str, t.Dict[str, t.Union[str, int]]) -> Inventory """Return an inventory instance created from the given hostname and variables.""" return Inventory(host_groups=dict(all={name: variables})) @@ -147,7 +219,7 @@ class Inventory: inventory_text = inventory_text.strip() if not args.explain: - write_text_file(path, inventory_text) + write_text_file(path, inventory_text + '\n') display.info(f'>>> Inventory\n{inventory_text}', verbosity=3) @@ -176,6 +248,9 @@ class HostProfile(t.Generic[THostConfig], metaclass=abc.ABCMeta): def setup(self): # type: () -> None """Perform out-of-band setup before delegation.""" + def on_target_failure(self) -> None: + """Executed during failure handling if this profile is a target.""" + def deprovision(self): # type: () -> None """Deprovision the host after delegation has completed.""" @@ -294,12 +369,18 @@ class RemoteProfile(SshTargetHostProfile[TRemoteConfig], metaclass=abc.ABCMeta): def create_core_ci(self, load): # type: (bool) -> AnsibleCoreCI """Create and return an AnsibleCoreCI instance.""" + if not self.config.arch: + raise InternalError(f'No arch specified for config: {self.config}') + return AnsibleCoreCI( args=self.args, - platform=self.config.platform, - version=self.config.version, - provider=self.config.provider, - suffix='controller' if self.controller else 'target', + resource=VmResource( + platform=self.config.platform, + version=self.config.version, + architecture=self.config.arch, + provider=self.config.provider, + tag='controller' if self.controller else 'target', + ), load=load, ) @@ -322,6 +403,17 @@ class ControllerProfile(SshTargetHostProfile[ControllerConfig], PosixProfile[Con class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[DockerConfig]): """Host profile for a docker instance.""" + + MARKER = 'ansible-test-marker' + + @dataclasses.dataclass(frozen=True) + class InitConfig: + """Configuration details required to run the container init.""" + options: list[str] + command: str + command_privileged: bool + expected_mounts: tuple[CGroupMount, ...] + @property def container_name(self): # type: () -> t.Optional[str] """Return the stored container name, if any, otherwise None.""" @@ -332,24 +424,519 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do """Store the given container name.""" self.state['container_name'] = value + @property + def cgroup_path(self) -> t.Optional[str]: + """Return the path to the cgroup v1 systemd hierarchy, if any, otherwise None.""" + return self.state.get('cgroup_path') + + @cgroup_path.setter + def cgroup_path(self, value: str) -> None: + """Store the path to the cgroup v1 systemd hierarchy.""" + self.state['cgroup_path'] = value + + @property + def label(self) -> str: + """Label to apply to resources related to this profile.""" + return f'{"controller" if self.controller else "target"}-{self.args.session_name}' + def provision(self): # type: () -> None """Provision the host before delegation.""" + init_probe = self.args.dev_probe_cgroups is not None + init_config = self.get_init_config() + container = run_support_container( args=self.args, context='__test_hosts__', image=self.config.image, - name=f'ansible-test-{"controller" if self.controller else "target"}-{self.args.session_name}', + name=f'ansible-test-{self.label}', ports=[22], publish_ports=not self.controller, # connections to the controller over SSH are not required - options=self.get_docker_run_options(), + options=init_config.options, cleanup=CleanupMode.NO, + cmd=self.build_init_command(init_config, init_probe), ) if not container: + if self.args.prime_containers: + if init_config.command_privileged or init_probe: + docker_pull(self.args, UTILITY_IMAGE) + return self.container_name = container.name + try: + options = ['--pid', 'host', '--privileged'] + + if init_config.command and init_config.command_privileged: + init_command = init_config.command + + if not init_probe: + init_command += f' && {shlex.join(self.wake_command)}' + + cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p', 'sh', '-c', init_command] + run_utility_container(self.args, f'ansible-test-init-{self.label}', cmd, options) + + if init_probe: + check_container_cgroup_status(self.args, self.config, self.container_name, init_config.expected_mounts) + + cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p'] + self.wake_command + run_utility_container(self.args, f'ansible-test-wake-{self.label}', cmd, options) + except SubprocessError: + display.info(f'Checking container "{self.container_name}" logs...') + docker_logs(self.args, self.container_name) + + raise + + def get_init_config(self) -> InitConfig: + """Return init config for running under the current container engine.""" + self.check_cgroup_requirements() + + engine = require_docker().command + init_config = getattr(self, f'get_{engine}_init_config')() + + return init_config + + def get_podman_init_config(self) -> InitConfig: + """Return init config for running under Podman.""" + options = self.get_common_run_options() + command: t.Optional[str] = None + command_privileged = False + expected_mounts: tuple[CGroupMount, ...] + + cgroup_version = get_docker_info(self.args).cgroup_version + + # Without AUDIT_WRITE the following errors may appear in the system logs of a container after attempting to log in using SSH: + # + # fatal: linux_audit_write_entry failed: Operation not permitted + # + # This occurs when running containers as root when the container host provides audit support, but the user lacks the AUDIT_WRITE capability. + # The AUDIT_WRITE capability is provided by docker by default, but not podman. + # See: https://github.com/moby/moby/pull/7179 + # + # OpenSSH Portable requires AUDIT_WRITE when logging in with a TTY if the Linux audit feature was compiled in. + # Containers with the feature enabled will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system. + # See: https://github.com/openssh/openssh-portable/blob/2dc328023f60212cd29504fc05d849133ae47355/audit-linux.c#L90 + # See: https://github.com/openssh/openssh-portable/blob/715c892f0a5295b391ae92c26ef4d6a86ea96e8e/loginrec.c#L476-L478 + # + # Some containers will be running a patched version of OpenSSH which blocks logins when EPERM is received while using the audit system. + # These containers will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system. + # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch + # + # Since only some containers carry the patch or enable the Linux audit feature in OpenSSH, this capability is enabled on a per-container basis. + # No warning is provided when adding this capability, since there's not really anything the user can do about it. + if self.config.audit == AuditMode.REQUIRED and detect_host_properties(self.args).audit_code == 'EPERM': + options.extend(('--cap-add', 'AUDIT_WRITE')) + + # Without AUDIT_CONTROL the following errors may appear in the system logs of a container after attempting to log in using SSH: + # + # pam_loginuid(sshd:session): Error writing /proc/self/loginuid: Operation not permitted + # pam_loginuid(sshd:session): set_loginuid failed + # + # Containers configured to use the pam_loginuid module will encounter this error. If the module is required, logins will fail. + # Since most containers will have this configuration, the code to handle this issue is applied to all containers. + # + # This occurs when the loginuid is set on the container host and doesn't match the user on the container host which is running the container. + # Container hosts which do not use systemd are likely to leave the loginuid unset and thus be unaffected. + # The most common source of a mismatch is the use of sudo to run ansible-test, which changes the uid but cannot change the loginuid. + # This condition typically occurs only under podman, since the loginuid is inherited from the current user. + # See: https://github.com/containers/podman/issues/13012#issuecomment-1034049725 + # + # This condition is detected by querying the loginuid of a container running on the container host. + # When it occurs, a warning is displayed and the AUDIT_CONTROL capability is added to containers to work around the issue. + # The warning serves as notice to the user that their usage of ansible-test is responsible for the additional capability requirement. + if (loginuid := detect_host_properties(self.args).loginuid) not in (0, LOGINUID_NOT_SET, None): + display.warning(f'Running containers with capability AUDIT_CONTROL since the container loginuid ({loginuid}) is incorrect. ' + 'This is most likely due to use of sudo to run ansible-test when loginuid is already set.', unique=True) + + options.extend(('--cap-add', 'AUDIT_CONTROL')) + + if self.config.cgroup == CGroupVersion.NONE: + # Containers which do not require cgroup do not use systemd. + + options.extend(( + # Disabling systemd support in Podman will allow these containers to work on hosts without systemd. + # Without this, running a container on a host without systemd results in errors such as (from crun): + # Error: crun: error stat'ing file `/sys/fs/cgroup/systemd`: No such file or directory: + # A similar error occurs when using runc: + # OCI runtime attempted to invoke a command that was not found + '--systemd', 'false', + # A private cgroup namespace limits what is visible in /proc/*/cgroup. + '--cgroupns', 'private', + # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Podman. + # This helps provide a consistent container environment across various container host configurations. + '--tmpfs', '/sys/fs/cgroup', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1: + # Podman hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-write in the container. + # They will also create a dedicated cgroup v1 systemd hierarchy for the container. + # On hosts with systemd this path is: /sys/fs/cgroup/systemd/libpod_parent/libpod-{container_id}/ + # On hosts without systemd this path is: /sys/fs/cgroup/systemd/{container_id}/ + + options.extend(( + # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics). + '--systemd', 'always', + # The host namespace must be used to permit the container to access the cgroup v1 systemd hierarchy created by Podman. + '--cgroupns', 'host', + # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container. + # Podman will provide a cgroup v1 systemd hiearchy on top of this. + '--tmpfs', '/sys/fs/cgroup', + )) + + self.check_systemd_cgroup_v1(options) # podman + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + # The mount point can be writable or not. + # The reason for the variation is not known. + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=None, state=CGroupState.HOST), + # The filesystem type can be tmpfs or devtmpfs. + # The reason for the variation is not known. + CGroupMount(path=CGroupPath.SYSTEMD_RELEASE_AGENT, type=None, writable=False, state=None), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2: + # Podman hosts providing cgroup v2 will give each container a read-write cgroup mount. + + options.extend(( + # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics). + '--systemd', 'always', + # A private cgroup namespace is used to avoid exposing the host cgroup to the container. + '--cgroupns', 'private', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE), + ) + elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2: + # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version. + # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create. + cgroup_path = self.create_systemd_cgroup_v1() # podman + command = f'echo 1 > {cgroup_path}/cgroup.procs' + + options.extend(( + # Force Podman to enable systemd support since a command is being provided. + '--systemd', 'always', + # A private cgroup namespace is required. Using the host cgroup namespace results in errors such as the following (from crun): + # Error: OCI runtime error: mount `/sys/fs/cgroup` to '/sys/fs/cgroup': Invalid argument + # A similar error occurs when using runc: + # Error: OCI runtime error: runc create failed: unable to start container process: error during container init: + # error mounting "/sys/fs/cgroup" to rootfs at "/sys/fs/cgroup": mount /sys/fs/cgroup:/sys/fs/cgroup (via /proc/self/fd/7), flags: 0x1000: + # invalid argument + '--cgroupns', 'private', + # Unlike Docker, Podman ignores a /sys/fs/cgroup tmpfs mount, instead exposing a cgroup v2 mount. + # The exposed volume will be read-write, but the container will have its own private namespace. + # Provide a read-only cgroup v1 systemd hierarchy under which the dedicated ansible-test cgroup will be mounted read-write. + # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy. + # Podman doesn't support using a tmpfs for this. Attempting to do so results in an error (from crun): + # Error: OCI runtime error: read: Invalid argument + # A similar error occurs when using runc: + # Error: OCI runtime error: runc create failed: unable to start container process: error during container init: + # error mounting "tmpfs" to rootfs at "/sys/fs/cgroup/systemd": tmpcopyup: failed to copy /sys/fs/cgroup/systemd to /proc/self/fd/7 + # (/tmp/runctop3876247619/runctmpdir1460907418): read /proc/self/fd/7/cgroup.kill: invalid argument + '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:ro', + # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test. + '--volume', f'{cgroup_path}:{cgroup_path}:rw', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE), + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=False, state=CGroupState.SHADOWED), + CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST), + ) + else: + raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.') + + return self.InitConfig( + options=options, + command=command, + command_privileged=command_privileged, + expected_mounts=expected_mounts, + ) + + def get_docker_init_config(self) -> InitConfig: + """Return init config for running under Docker.""" + options = self.get_common_run_options() + command: t.Optional[str] = None + command_privileged = False + expected_mounts: tuple[CGroupMount, ...] + + cgroup_version = get_docker_info(self.args).cgroup_version + + if self.config.cgroup == CGroupVersion.NONE: + # Containers which do not require cgroup do not use systemd. + + if get_docker_info(self.args).cgroupns_option_supported: + # Use the `--cgroupns` option if it is supported. + # Older servers which do not support the option use the host group namespace. + # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only). + # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517 + # If the host cgroup namespace is used, cgroup information will be visible, but the cgroup mounts will be unavailable due to the tmpfs below. + options.extend(( + # A private cgroup namespace limits what is visible in /proc/*/cgroup. + '--cgroupns', 'private', + )) + + options.extend(( + # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Docker. + # This helps provide a consistent container environment across various container host configurations. + '--tmpfs', '/sys/fs/cgroup', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1: + # Docker hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-only in the container. + # They will also create a dedicated cgroup v1 systemd hierarchy for the container. + # The cgroup v1 system hierarchy path is: /sys/fs/cgroup/systemd/{container_id}/ + + if get_docker_info(self.args).cgroupns_option_supported: + # Use the `--cgroupns` option if it is supported. + # Older servers which do not support the option use the host group namespace. + # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only). + # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517 + options.extend(( + # The host cgroup namespace must be used. + # Otherwise, /proc/1/cgroup will report "/" for the cgroup path, which is incorrect. + # See: https://github.com/systemd/systemd/issues/19245#issuecomment-815954506 + # It is set here to avoid relying on the current Docker configuration. + '--cgroupns', 'host', + )) + + options.extend(( + # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container. + '--tmpfs', '/sys/fs/cgroup', + # A cgroup v1 systemd hierarchy needs to be mounted read-write over the read-only one provided by Docker. + # Alternatives were tested, but were unusable due to various issues: + # - Attempting to remount the existing mount point read-write will result in a "mount point is busy" error. + # - Adding the entire "/sys/fs/cgroup" mount will expose hierarchies other than systemd. + # If the host is a cgroup v2 hybrid host it would also expose the /sys/fs/cgroup/unified/ hierarchy read-write. + # On older systems, such as an Ubuntu 18.04 host, a dedicated v2 cgroup would not be used, exposing the host cgroups to the container. + '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', + )) + + self.check_systemd_cgroup_v1(options) # docker + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2: + # Docker hosts providing cgroup v2 will give each container a read-only cgroup mount. + # It must be remounted read-write before systemd starts. + # This must be done in a privileged container, otherwise a "permission denied" error can occur. + command = 'mount -o remount,rw /sys/fs/cgroup/' + command_privileged = True + + options.extend(( + # A private cgroup namespace is used to avoid exposing the host cgroup to the container. + # This matches the behavior in Podman 1.7.0 and later, which select cgroupns 'host' mode for cgroup v1 and 'private' mode for cgroup v2. + # See: https://github.com/containers/podman/pull/4374 + # See: https://github.com/containers/podman/blob/main/RELEASE_NOTES.md#170 + '--cgroupns', 'private', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE), + ) + elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2: + # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version. + # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create. + cgroup_path = self.create_systemd_cgroup_v1() # docker + command = f'echo 1 > {cgroup_path}/cgroup.procs' + + options.extend(( + # A private cgroup namespace is used since no access to the host cgroup namespace is required. + # This matches the configuration used for running cgroup v1 containers under Podman. + '--cgroupns', 'private', + # Provide a read-write tmpfs filesystem to support additional cgroup mount points. + # Without this Docker will provide a read-only cgroup2 mount instead. + '--tmpfs', '/sys/fs/cgroup', + # Provide a read-write tmpfs filesystem to simulate a systemd cgroup v1 hierarchy. + # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy. + '--tmpfs', '/sys/fs/cgroup/systemd', + # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test. + '--volume', f'{cgroup_path}:{cgroup_path}:rw', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.TMPFS, writable=True, state=None), + CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST), + ) + else: + raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.') + + return self.InitConfig( + options=options, + command=command, + command_privileged=command_privileged, + expected_mounts=expected_mounts, + ) + + def build_init_command(self, init_config: InitConfig, sleep: bool) -> t.Optional[list[str]]: + """ + Build and return the command to start in the container. + Returns None if the default command for the container should be used. + + The sleep duration below was selected to: + + - Allow enough time to perform necessary operations in the container before waking it. + - Make the delay obvious if the wake command doesn't run or succeed. + - Avoid hanging indefinitely or for an unreasonably long time. + + NOTE: The container must have a POSIX-compliant default shell "sh" with a non-builtin "sleep" command. + """ + command = '' + + if init_config.command and not init_config.command_privileged: + command += f'{init_config.command} && ' + + if sleep or init_config.command_privileged: + command += 'sleep 60 ; ' + + if not command: + return None + + docker_pull(self.args, self.config.image) + inspect = docker_image_inspect(self.args, self.config.image) + + command += f'exec {shlex.join(inspect.cmd)}' + + return ['sh', '-c', command] + + @property + def wake_command(self) -> list[str]: + """ + The command used to wake the container from sleep. + This will be run inside our utility container, so the command used does not need to be present in the container being woken up. + """ + return ['pkill', 'sleep'] + + def check_systemd_cgroup_v1(self, options: list[str]) -> None: + """Check the cgroup v1 systemd hierarchy to verify it is writeable for our container.""" + probe_script = (read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'check_systemd_cgroup_v1.sh')) + .replace('@MARKER@', self.MARKER) + .replace('@LABEL@', self.label)) + + cmd = ['sh'] + + try: + run_utility_container(self.args, f'ansible-test-cgroup-check-{self.label}', cmd, options, data=probe_script) + except SubprocessError as ex: + if error := self.extract_error(ex.stderr): + raise ControlGroupError(self.args, 'Unable to create a v1 cgroup within the systemd hierarchy.\n' + f'Reason: {error}') from ex # cgroup probe failed + + raise + + def create_systemd_cgroup_v1(self) -> str: + """Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path.""" + self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}' + + # Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0. + # The mkdir command will fail with "Permission denied" otherwise. + options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged'] + cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && mkdir {shlex.quote(self.cgroup_path)}'] + + try: + run_utility_container(self.args, f'ansible-test-cgroup-create-{self.label}', cmd, options) + except SubprocessError as ex: + if error := self.extract_error(ex.stderr): + raise ControlGroupError(self.args, f'Unable to create a v1 cgroup within the systemd hierarchy.\n' + f'Reason: {error}') from ex # cgroup create permission denied + + raise + + return self.cgroup_path + + @property + def delete_systemd_cgroup_v1_command(self) -> list[str]: + """The command used to remove the previously created ansible-test cgroup in the v1 systemd hierarchy.""" + return ['find', self.cgroup_path, '-type', 'd', '-delete'] + + def delete_systemd_cgroup_v1(self) -> None: + """Delete a previously created ansible-test cgroup in the v1 systemd hierarchy.""" + # Privileged mode is required to remove the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0. + # The BusyBox find utility will report "Permission denied" otherwise, although it still exits with a status code of 0. + options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged'] + cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && {shlex.join(self.delete_systemd_cgroup_v1_command)}'] + + try: + run_utility_container(self.args, f'ansible-test-cgroup-delete-{self.label}', cmd, options) + except SubprocessError as ex: + if error := self.extract_error(ex.stderr): + if error.endswith(': No such file or directory'): + return + + display.error(str(ex)) + + def extract_error(self, value: str) -> t.Optional[str]: + """ + Extract the ansible-test portion of the error message from the given value and return it. + Returns None if no ansible-test marker was found. + """ + lines = value.strip().splitlines() + + try: + idx = lines.index(self.MARKER) + except ValueError: + return None + + lines = lines[idx + 1:] + message = '\n'.join(lines) + + return message + + def check_cgroup_requirements(self): + """Check cgroup requirements for the container.""" + cgroup_version = get_docker_info(self.args).cgroup_version + + if cgroup_version not in (1, 2): + raise ApplicationError(f'The container host provides cgroup v{cgroup_version}, but only version v1 and v2 are supported.') + + # Stop early for containers which require cgroup v2 when the container host does not provide it. + # None of the containers included with ansible-test currently use this configuration. + # Support for v2-only was added in preparation for the eventual removal of cgroup v1 support from systemd after EOY 2023. + # See: https://github.com/systemd/systemd/pull/24086 + if self.config.cgroup == CGroupVersion.V2_ONLY and cgroup_version != 2: + raise ApplicationError(f'Container {self.config.name} requires cgroup v2 but the container host provides cgroup v{cgroup_version}.') + + # Containers which use old versions of systemd (earlier than version 226) require cgroup v1 support. + # If the host is a cgroup v2 (unified) host, changes must be made to how the container is run. + # + # See: https://github.com/systemd/systemd/blob/main/NEWS + # Under the "CHANGES WITH 226" section: + # > systemd now optionally supports the new Linux kernel "unified" control group hierarchy. + # + # NOTE: The container host must have the cgroup v1 mount already present. + # If the container is run rootless, the user it runs under must have permissions to the mount. + # + # The following commands can be used to make the mount available: + # + # mkdir /sys/fs/cgroup/systemd + # mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr + # chown -R {user}:{group} /sys/fs/cgroup/systemd # only when rootless + # + # See: https://github.com/containers/crun/blob/main/crun.1.md#runocisystemdforce_cgroup_v1path + if self.config.cgroup == CGroupVersion.V1_ONLY or (self.config.cgroup != CGroupVersion.NONE and get_docker_info(self.args).cgroup_version == 1): + if (cgroup_v1 := detect_host_properties(self.args).cgroup_v1) != SystemdControlGroupV1Status.VALID: + if self.config.cgroup == CGroupVersion.V1_ONLY: + if get_docker_info(self.args).cgroup_version == 2: + reason = f'Container {self.config.name} requires cgroup v1, but the container host only provides cgroup v2.' + else: + reason = f'Container {self.config.name} requires cgroup v1, but the container host does not appear to be running systemd.' + else: + reason = 'The container host provides cgroup v1, but does not appear to be running systemd.' + + reason += f'\n{cgroup_v1.value}' + + raise ControlGroupError(self.args, reason) # cgroup probe reported invalid state + def setup(self): # type: () -> None """Perform out-of-band setup before delegation.""" bootstrapper = BootstrapDocker( @@ -361,32 +948,62 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do setup_sh = bootstrapper.get_script() shell = setup_sh.splitlines()[0][2:] - docker_exec(self.args, self.container_name, [shell], data=setup_sh) + try: + docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False) + except SubprocessError: + display.info(f'Checking container "{self.container_name}" logs...') + docker_logs(self.args, self.container_name) + raise def deprovision(self): # type: () -> None """Deprovision the host after delegation has completed.""" - if not self.container_name: - return # provision was never called or did not succeed, so there is no container to remove - - if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success): - docker_rm(self.args, self.container_name) + container_exists = False + + if self.container_name: + if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success): + docker_rm(self.args, self.container_name) + else: + container_exists = True + + if self.cgroup_path: + if container_exists: + display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing. ' + f'Then run `{shlex.join(self.delete_systemd_cgroup_v1_command)}` on the container host.') + else: + self.delete_systemd_cgroup_v1() + elif container_exists: + display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing.') def wait(self): # type: () -> None """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets.""" if not self.controller: con = self.get_controller_target_connections()[0] + last_error = '' - for dummy in range(1, 60): + for dummy in range(1, 10): try: con.run(['id'], capture=True) except SubprocessError as ex: if 'Permission denied' in ex.message: raise + last_error = str(ex) time.sleep(1) else: return + display.info('Checking SSH debug output...') + display.info(last_error) + + if not self.args.delegate and not self.args.host_path: + def callback() -> None: + """Callback to run during error display.""" + self.on_target_failure() # when the controller is not delegated, report failures immediately + else: + callback = None + + raise HostConnectionError(f'Timeout waiting for {self.config.name} container {self.container_name}.', callback) + def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" containers = get_container_database(self.args) @@ -402,6 +1019,10 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do port=port, identity_file=SshKey(self.args).key, python_interpreter=self.python.path, + # CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. + # Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here. + # A substring is used to allow custom containers to work, not just the one provided with ansible-test. + enable_rsa_sha1='centos6' in self.config.image, ) return [SshConnection(self.args, settings)] @@ -414,13 +1035,46 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do """Return the working directory for the host.""" return '/root' - def get_docker_run_options(self): # type: () -> t.List[str] + def on_target_failure(self) -> None: + """Executed during failure handling if this profile is a target.""" + display.info(f'Checking container "{self.container_name}" logs...') + + try: + docker_logs(self.args, self.container_name) + except SubprocessError as ex: + display.error(str(ex)) + + if self.config.cgroup != CGroupVersion.NONE: + # Containers with cgroup support are assumed to be running systemd. + display.info(f'Checking container "{self.container_name}" systemd logs...') + + try: + docker_exec(self.args, self.container_name, ['journalctl'], capture=False) + except SubprocessError as ex: + display.error(str(ex)) + + display.error(f'Connection to container "{self.container_name}" failed. See logs and original error above.') + + def get_common_run_options(self) -> list[str]: """Return a list of options needed to run the container.""" options = [ - '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro', - f'--privileged={str(self.config.privileged).lower()}', + # These temporary mount points need to be created at run time when using Docker. + # They are automatically provided by Podman, but will be overridden by VOLUME instructions for the container, if they exist. + # If supporting containers with VOLUME instructions is not desired, these options could be limited to use with Docker. + # See: https://github.com/containers/podman/pull/1318 + # Previously they were handled by the VOLUME instruction during container image creation. + # However, that approach creates anonymous volumes when running the container, which are then left behind after the container is deleted. + # These options eliminate the need for the VOLUME instruction, and override it if they are present. + # The mount options used are those typically found on Linux systems. + # Of special note is the "exec" option for "/tmp", which is required by ansible-test for path injection of executables using temporary directories. + '--tmpfs', '/tmp:exec', + '--tmpfs', '/run:exec', + '--tmpfs', '/run/lock', # some systemd containers require a separate tmpfs here, such as Ubuntu 20.04 and Ubuntu 22.04 ] + if self.config.privileged: + options.append('--privileged') + if self.config.memory: options.extend([ f'--memory={self.config.memory}', @@ -448,7 +1102,7 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]): """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets.""" self.wait_until_ready() - def get_inventory_variables(self): + def get_inventory_variables(self): # type: () -> t.Dict[str, t.Optional[t.Union[str, int]]] """Return inventory variables for accessing this host.""" core_ci = self.wait_for_instance() connection = core_ci.connection @@ -460,8 +1114,15 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]): ansible_port=connection.port, ansible_user=connection.username, ansible_ssh_private_key_file=core_ci.ssh_key.key, + # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later. + # IOS CSR 1000V uses an ancient SSH server, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later. + # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here. + # NOTE: This option only exists in ansible-core 2.14 and later. For older ansible-core versions, use of Paramiko 2.8.x or earlier is required. + # See: https://github.com/ansible/ansible/pull/78789 + # See: https://github.com/ansible/ansible/pull/78842 + ansible_paramiko_use_rsa_sha2_algorithms='no', ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform, - ) + ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] return variables @@ -483,13 +1144,14 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]): for dummy in range(1, 90): try: - intercept_python(self.args, self.args.controller_python, cmd, env) - except SubprocessError: + intercept_python(self.args, self.args.controller_python, cmd, env, capture=True) + except SubprocessError as ex: + display.warning(str(ex)) time.sleep(10) else: return - raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') + raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" @@ -501,6 +1163,10 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]): port=core_ci.connection.port, user=core_ci.connection.username, identity_file=core_ci.ssh_key.key, + # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. + # IOS CSR 1000V uses an ancient SSH server, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. + # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here. + enable_rsa_sha1=True, ) return [SshConnection(self.args, settings)] @@ -546,7 +1212,7 @@ class PosixRemoteProfile(ControllerHostProfile[PosixRemoteConfig], RemoteProfile shell = setup_sh.splitlines()[0][2:] ssh = self.get_origin_controller_connection() - ssh.run([shell], data=setup_sh) + ssh.run([shell], data=setup_sh, capture=False) def get_ssh_connection(self): # type: () -> SshConnection """Return an SSH connection for accessing the host.""" @@ -562,15 +1228,12 @@ class PosixRemoteProfile(ControllerHostProfile[PosixRemoteConfig], RemoteProfile ) if settings.user == 'root': - become = None - elif self.config.platform == 'freebsd': - become = Su() - elif self.config.platform == 'macos': - become = Sudo() - elif self.config.platform == 'rhel': - become = Sudo() + become = None # type: t.Optional[Become] + elif self.config.become: + become = SUPPORTED_BECOME_METHODS[self.config.become]() else: - raise NotImplementedError(f'Become support has not been implemented for platform "{self.config.platform}" and user "{settings.user}" is not root.') + display.warning(f'Defaulting to "sudo" for platform "{self.config.platform}" become support.', unique=True) + become = Sudo() return SshConnection(self.args, settings, become) @@ -582,12 +1245,12 @@ class PosixRemoteProfile(ControllerHostProfile[PosixRemoteConfig], RemoteProfile try: return self.get_working_directory() except SubprocessError as ex: - if 'Permission denied' in ex.message: - raise - + # No "Permission denied" check is performed here. + # Unlike containers, with remote instances, user configuration isn't guaranteed to have been completed before SSH connections are attempted. + display.warning(str(ex)) time.sleep(10) - raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') + raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" @@ -672,7 +1335,7 @@ class WindowsRemoteProfile(RemoteProfile[WindowsRemoteConfig]): """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets.""" self.wait_until_ready() - def get_inventory_variables(self): + def get_inventory_variables(self): # type: () -> t.Dict[str, t.Optional[t.Union[str, int]]] """Return inventory variables for accessing this host.""" core_ci = self.wait_for_instance() connection = core_ci.connection @@ -686,7 +1349,7 @@ class WindowsRemoteProfile(RemoteProfile[WindowsRemoteConfig]): ansible_user=connection.username, ansible_password=connection.password, ansible_ssh_private_key_file=core_ci.ssh_key.key, - ) + ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] # HACK: force 2016 to use NTLM + HTTP message encryption if self.config.version == '2016': @@ -716,13 +1379,14 @@ class WindowsRemoteProfile(RemoteProfile[WindowsRemoteConfig]): for dummy in range(1, 120): try: - intercept_python(self.args, self.args.controller_python, cmd, env) - except SubprocessError: + intercept_python(self.args, self.args.controller_python, cmd, env, capture=True) + except SubprocessError as ex: + display.warning(str(ex)) time.sleep(10) else: return - raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') + raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" diff --git a/test/lib/ansible_test/_internal/inventory.py b/test/lib/ansible_test/_internal/inventory.py index 73a9ae9c38..7e930040c0 100644 --- a/test/lib/ansible_test/_internal/inventory.py +++ b/test/lib/ansible_test/_internal/inventory.py @@ -25,6 +25,10 @@ from .host_profiles import ( WindowsRemoteProfile, ) +from .ssh import ( + ssh_options_to_str, +) + def create_controller_inventory(args, path, controller_host): # type: (EnvironmentConfig, str, ControllerHostProfile) -> None """Create and return inventory for use in controller-only integration tests.""" @@ -94,7 +98,7 @@ def create_network_inventory(args, path, target_hosts): # type: (EnvironmentCon return target_hosts = t.cast(t.List[NetworkRemoteProfile], target_hosts) - host_groups = {target_host.config.platform: {} for target_host in target_hosts} + host_groups = {target_host.config.platform: {} for target_host in target_hosts} # type: t.Dict[str, t.Dict[str, t.Dict[str, t.Union[str, int]]]] for target_host in target_hosts: host_groups[target_host.config.platform][sanitize_host_name(target_host.config.name)] = target_host.get_inventory_variables() @@ -149,7 +153,8 @@ def create_posix_inventory(args, path, target_hosts, needs_ssh=False): # type: ansible_port=ssh.settings.port, ansible_user=ssh.settings.user, ansible_ssh_private_key_file=ssh.settings.identity_file, - ) + ansible_ssh_extra_args=ssh_options_to_str(ssh.settings.options), + ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] if ssh.become: testhost.update( diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py index 9d3301a147..df8c98d498 100644 --- a/test/lib/ansible_test/_internal/io.py +++ b/test/lib/ansible_test/_internal/io.py @@ -14,17 +14,17 @@ from .encoding import ( ) -def read_json_file(path): # type: (t.AnyStr) -> t.Any +def read_json_file(path): # type: (str) -> t.Any """Parse and return the json content from the specified path.""" return json.loads(read_text_file(path)) -def read_text_file(path): # type: (t.AnyStr) -> t.Text +def read_text_file(path): # type: (str) -> t.Text """Return the contents of the specified path as text.""" return to_text(read_binary_file(path)) -def read_binary_file(path): # type: (t.AnyStr) -> bytes +def read_binary_file(path): # type: (str) -> bytes """Return the contents of the specified path as bytes.""" with open_binary_file(path) as file_obj: return file_obj.read() @@ -43,7 +43,7 @@ def write_json_file(path, # type: str content, # type: t.Any create_directories=False, # type: bool formatted=True, # type: bool - encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] + encoder=None, # type: t.Optional[t.Type[json.JSONEncoder]] ): # type: (...) -> str """Write the given json content to the specified path, optionally creating missing directories.""" text_content = json.dumps(content, @@ -67,21 +67,19 @@ def write_text_file(path, content, create_directories=False): # type: (str, str file_obj.write(to_bytes(content)) -def open_text_file(path, mode='r'): # type: (str, str) -> t.TextIO +def open_text_file(path, mode='r'): # type: (str, str) -> t.IO[str] """Open the given path for text access.""" if 'b' in mode: raise Exception('mode cannot include "b" for text files: %s' % mode) - # noinspection PyTypeChecker return io.open(to_bytes(path), mode, encoding=ENCODING) # pylint: disable=consider-using-with -def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO +def open_binary_file(path, mode='rb'): # type: (str, str) -> t.IO[bytes] """Open the given path for binary access.""" if 'b' not in mode: raise Exception('mode must include "b" for binary files: %s' % mode) - # noinspection PyTypeChecker return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py index 769ec8348b..e7f82b0aac 100644 --- a/test/lib/ansible_test/_internal/metadata.py +++ b/test/lib/ansible_test/_internal/metadata.py @@ -21,8 +21,8 @@ class Metadata: """Metadata object for passing data to delegated tests.""" def __init__(self): """Initialize metadata.""" - self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int]]] - self.cloud_config = None # type: t.Optional[t.Dict[str, str]] + self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int], ...]] + self.cloud_config = None # type: t.Optional[t.Dict[str, t.Dict[str, t.Union[int, str, bool]]]] self.change_description = None # type: t.Optional[ChangeDescription] self.ci_provider = None # type: t.Optional[str] diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py index d92f9f6589..e6ccc6ed5f 100644 --- a/test/lib/ansible_test/_internal/payload.py +++ b/test/lib/ansible_test/_internal/payload.py @@ -34,8 +34,8 @@ from .util_common import ( ) # improve performance by disabling uid/gid lookups -tarfile.pwd = None -tarfile.grp = None +tarfile.pwd = None # type: ignore[attr-defined] # undocumented attribute +tarfile.grp = None # type: ignore[attr-defined] # undocumented attribute def create_payload(args, dst_path): # type: (CommonConfig, str) -> None @@ -69,8 +69,8 @@ def create_payload(args, dst_path): # type: (CommonConfig, str) -> None collection_layouts = data_context().create_collection_layouts() - content_files = [] - extra_files = [] + content_files = [] # type: t.List[t.Tuple[str, str]] + extra_files = [] # type: t.List[t.Tuple[str, str]] for layout in collection_layouts: if layout == data_context().content: diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py index e8972ac87c..7834614265 100644 --- a/test/lib/ansible_test/_internal/provider/__init__.py +++ b/test/lib/ansible_test/_internal/provider/__init__.py @@ -16,7 +16,7 @@ def get_path_provider_classes(provider_type): # type: (t.Type[TPathProvider]) - return sorted(get_subclasses(provider_type), key=lambda c: (c.priority, c.__name__)) -def find_path_provider(provider_type, # type: t.Type[TPathProvider], +def find_path_provider(provider_type, # type: t.Type[TPathProvider] provider_classes, # type: t.List[t.Type[TPathProvider]] path, # type: str walk, # type: bool diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py index 147fcbd56f..9fd13550e5 100644 --- a/test/lib/ansible_test/_internal/provider/layout/__init__.py +++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py @@ -91,6 +91,7 @@ class ContentLayout(Layout): unit_module_path, # type: str unit_module_utils_path, # type: str unit_messages, # type: t.Optional[LayoutMessages] + unsupported=False, # type: bool ): # type: (...) -> None super().__init__(root, paths) @@ -108,6 +109,7 @@ class ContentLayout(Layout): self.unit_module_path = unit_module_path self.unit_module_utils_path = unit_module_utils_path self.unit_messages = unit_messages + self.unsupported = unsupported self.is_ansible = root == ANSIBLE_SOURCE_ROOT @@ -204,7 +206,7 @@ class LayoutProvider(PathProvider): def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple[t.Dict[str, t.Any], t.List[str]] """Return a filesystem tree from the given list of paths.""" - tree = {}, [] + tree = {}, [] # type: t.Tuple[t.Dict[str, t.Any], t.List[str]] for path in paths: parts = path.split(os.path.sep) diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py index 5dca046f02..6b826ee4a3 100644 --- a/test/lib/ansible_test/_internal/provider/layout/collection.py +++ b/test/lib/ansible_test/_internal/provider/layout/collection.py @@ -11,6 +11,10 @@ from . import ( LayoutMessages, ) +from ...util import ( + is_valid_identifier, +) + class CollectionLayout(LayoutProvider): """Layout provider for Ansible collections.""" @@ -28,6 +32,10 @@ class CollectionLayout(LayoutProvider): collection_root = os.path.dirname(os.path.dirname(root)) collection_dir = os.path.relpath(root, collection_root) + + collection_namespace: str + collection_name: str + collection_namespace, collection_name = collection_dir.split(os.path.sep) collection_root = os.path.dirname(collection_root) @@ -65,6 +73,7 @@ class CollectionLayout(LayoutProvider): unit_module_path='tests/unit/plugins/modules', unit_module_utils_path='tests/unit/plugins/module_utils', unit_messages=unit_messages, + unsupported=not(is_valid_identifier(collection_namespace) and is_valid_identifier(collection_name)), ) @staticmethod diff --git a/test/lib/ansible_test/_internal/provider/layout/unsupported.py b/test/lib/ansible_test/_internal/provider/layout/unsupported.py new file mode 100644 index 0000000000..80a9129198 --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/layout/unsupported.py @@ -0,0 +1,42 @@ +"""Layout provider for an unsupported directory layout.""" +from __future__ import annotations + +import typing as t + +from . import ( + ContentLayout, + LayoutProvider, +) + + +class UnsupportedLayout(LayoutProvider): + """Layout provider for an unsupported directory layout.""" + sequence = 0 # disable automatic detection + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return False + + def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout + """Create a Layout using the given root and paths.""" + plugin_paths = dict((p, p) for p in self.PLUGIN_TYPES) + + return ContentLayout(root, + paths, + plugin_paths=plugin_paths, + collection=None, + test_path='', + results_path='', + sanity_path='', + sanity_messages=None, + integration_path='', + integration_targets_path='', + integration_vars_path='', + integration_messages=None, + unit_path='', + unit_module_path='', + unit_module_utils_path='', + unit_messages=None, + unsupported=True, + ) diff --git a/test/lib/ansible_test/_internal/provider/source/unsupported.py b/test/lib/ansible_test/_internal/provider/source/unsupported.py new file mode 100644 index 0000000000..ff5562c62c --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/source/unsupported.py @@ -0,0 +1,22 @@ +"""Source provider to use when the layout is unsupported.""" +from __future__ import annotations + +import typing as t + +from . import ( + SourceProvider, +) + + +class UnsupportedSource(SourceProvider): + """Source provider to use when the layout is unsupported.""" + sequence = 0 # disable automatic detection + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return False + + def get_paths(self, path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" + return [] diff --git a/test/lib/ansible_test/_internal/provisioning.py b/test/lib/ansible_test/_internal/provisioning.py index a95360360b..5a5361ed60 100644 --- a/test/lib/ansible_test/_internal/provisioning.py +++ b/test/lib/ansible_test/_internal/provisioning.py @@ -18,10 +18,12 @@ from .config import ( from .util import ( ApplicationError, + HostConnectionError, display, open_binary_file, verify_sys_executable, version_to_str, + type_guard, ) from .thread import ( @@ -88,17 +90,16 @@ class HostState: if not self.target_profiles: raise Exception('No target profiles found.') - if not all(isinstance(target, profile_type) for target in self.target_profiles): - raise Exception(f'Target profile(s) are not of the required type: {profile_type}') + assert type_guard(self.target_profiles, profile_type) - return self.target_profiles + return t.cast(t.List[THostProfile], self.target_profiles) def prepare_profiles( args, # type: TEnvironmentConfig targets_use_pypi=False, # type: bool skip_setup=False, # type: bool - requirements=None, # type: t.Optional[t.Callable[[TEnvironmentConfig, HostState], None]] + requirements=None, # type: t.Optional[t.Callable[[HostProfile], None]] ): # type: (...) -> HostState """ Create new profiles, or load existing ones, and return them. @@ -138,7 +139,7 @@ def prepare_profiles( check_controller_python(args, host_state) if requirements: - requirements(args, host_state) + requirements(host_state.controller_profile) def configure(profile): # type: (HostProfile) -> None """Configure the given profile.""" @@ -147,6 +148,9 @@ def prepare_profiles( if not skip_setup: profile.configure() + if requirements: + requirements(profile) + dispatch_jobs([(profile, WrappedThread(functools.partial(configure, profile))) for profile in host_state.target_profiles]) return host_state @@ -184,13 +188,26 @@ def dispatch_jobs(jobs): # type: (t.List[t.Tuple[HostProfile, WrappedThread]]) time.sleep(1) failed = False + connection_failures = 0 for profile, thread in jobs: try: thread.wait_for_result() + except HostConnectionError as ex: + display.error(f'Host {profile.config} connection failed:\n{ex}') + failed = True + connection_failures += 1 + except ApplicationError as ex: + display.error(f'Host {profile.config} job failed:\n{ex}') + failed = True except Exception as ex: # pylint: disable=broad-except - display.error(f'Host {profile} job failed: {ex}\n{"".join(traceback.format_tb(ex.__traceback__))}') + name = f'{"" if ex.__class__.__module__ == "builtins" else ex.__class__.__module__ + "."}{ex.__class__.__qualname__}' + display.error(f'Host {profile.config} job failed:\nTraceback (most recent call last):\n' + f'{"".join(traceback.format_tb(ex.__traceback__)).rstrip()}\n{name}: {ex}') failed = True + if connection_failures: + raise HostConnectionError(f'Host job(s) failed, including {connection_failures} connection failure(s). See previous error(s) for details.') + if failed: raise ApplicationError('Host job(s) failed. See previous error(s) for details.') diff --git a/test/lib/ansible_test/_internal/pypi_proxy.py b/test/lib/ansible_test/_internal/pypi_proxy.py index 968794fd20..e31db6dcc1 100644 --- a/test/lib/ansible_test/_internal/pypi_proxy.py +++ b/test/lib/ansible_test/_internal/pypi_proxy.py @@ -124,7 +124,8 @@ def configure_target_pypi_proxy(args, profile, pypi_endpoint, pypi_hostname): # force = 'yes' if profile.config.is_managed else 'no' - run_playbook(args, inventory_path, 'pypi_proxy_prepare.yml', dict(pypi_endpoint=pypi_endpoint, pypi_hostname=pypi_hostname, force=force), capture=True) + run_playbook(args, inventory_path, 'pypi_proxy_prepare.yml', capture=True, variables=dict( + pypi_endpoint=pypi_endpoint, pypi_hostname=pypi_hostname, force=force)) atexit.register(cleanup_pypi_proxy) diff --git a/test/lib/ansible_test/_internal/python_requirements.py b/test/lib/ansible_test/_internal/python_requirements.py index aaaf44b8b3..eed177c393 100644 --- a/test/lib/ansible_test/_internal/python_requirements.py +++ b/test/lib/ansible_test/_internal/python_requirements.py @@ -142,9 +142,9 @@ def install_requirements( if ansible: try: - ansible_cache = install_requirements.ansible_cache + ansible_cache = install_requirements.ansible_cache # type: ignore[attr-defined] except AttributeError: - ansible_cache = install_requirements.ansible_cache = {} + ansible_cache = install_requirements.ansible_cache = {} # type: ignore[attr-defined] ansible_installed = ansible_cache.get(python.path) @@ -262,7 +262,7 @@ def run_pip( if not args.explain: try: - connection.run([python.path], data=script) + connection.run([python.path], data=script, capture=False) except SubprocessError: script = prepare_pip_script([PipVersion()]) @@ -492,7 +492,7 @@ def prepare_pip_script(commands): # type: (t.List[PipCommand]) -> str def usable_pip_file(path): # type: (t.Optional[str]) -> bool """Return True if the specified pip file is usable, otherwise False.""" - return path and os.path.exists(path) and os.path.getsize(path) + return bool(path) and os.path.exists(path) and bool(os.path.getsize(path)) # Cryptography diff --git a/test/lib/ansible_test/_internal/ssh.py b/test/lib/ansible_test/_internal/ssh.py index 21212dc1aa..b5fcd5a813 100644 --- a/test/lib/ansible_test/_internal/ssh.py +++ b/test/lib/ansible_test/_internal/ssh.py @@ -2,6 +2,7 @@ from __future__ import annotations import dataclasses +import itertools import json import os import random @@ -38,16 +39,46 @@ class SshConnectionDetail: identity_file: str python_interpreter: t.Optional[str] = None shell_type: t.Optional[str] = None + enable_rsa_sha1: bool = False def __post_init__(self): self.name = sanitize_host_name(self.name) + @property + def options(self) -> dict[str, str]: + """OpenSSH config options, which can be passed to the `ssh` CLI with the `-o` argument.""" + options: dict[str, str] = {} + + if self.enable_rsa_sha1: + # Newer OpenSSH clients connecting to older SSH servers must explicitly enable ssh-rsa support. + # OpenSSH 8.8, released on 2021-09-26, deprecated using RSA with the SHA-1 hash algorithm (ssh-rsa). + # OpenSSH 7.2, released on 2016-02-29, added support for using RSA with SHA-256/512 hash algorithms. + # See: https://www.openssh.com/txt/release-8.8 + algorithms = '+ssh-rsa' # append the algorithm to the default list, requires OpenSSH 7.0 or later + + options.update(dict( + # Host key signature algorithms that the client wants to use. + # Available options can be found with `ssh -Q HostKeyAlgorithms` or `ssh -Q key` on older clients. + # This option was updated in OpenSSH 7.0, released on 2015-08-11, to support the "+" prefix. + # See: https://www.openssh.com/txt/release-7.0 + HostKeyAlgorithms=algorithms, + # Signature algorithms that will be used for public key authentication. + # Available options can be found with `ssh -Q PubkeyAcceptedAlgorithms` or `ssh -Q key` on older clients. + # This option was added in OpenSSH 7.0, released on 2015-08-11. + # See: https://www.openssh.com/txt/release-7.0 + # This option is an alias for PubkeyAcceptedAlgorithms, which was added in OpenSSH 8.5. + # See: https://www.openssh.com/txt/release-8.5 + PubkeyAcceptedKeyTypes=algorithms, + )) + + return options + class SshProcess: """Wrapper around an SSH process.""" def __init__(self, process): # type: (t.Optional[subprocess.Popen]) -> None self._process = process - self.pending_forwards = None # type: t.Optional[t.Set[t.Tuple[str, int]]] + self.pending_forwards = None # type: t.Optional[t.List[t.Tuple[str, int]]] self.forwards = {} # type: t.Dict[t.Tuple[str, int], int] @@ -71,7 +102,7 @@ class SshProcess: def collect_port_forwards(self): # type: (SshProcess) -> t.Dict[t.Tuple[str, int], int] """Collect port assignments for dynamic SSH port forwards.""" - errors = [] + errors = [] # type: t.List[str] display.info('Collecting %d SSH port forward(s).' % len(self.pending_forwards), verbosity=2) @@ -107,7 +138,7 @@ class SshProcess: dst = (dst_host, dst_port) else: # explain mode - dst = list(self.pending_forwards)[0] + dst = self.pending_forwards[0] src_port = random.randint(40000, 50000) self.pending_forwards.remove(dst) @@ -141,7 +172,7 @@ def create_ssh_command( if ssh.user: cmd.extend(['-l', ssh.user]) # user to log in as on the remote machine - ssh_options = dict( + ssh_options: dict[str, t.Union[int, str]] = dict( BatchMode='yes', ExitOnForwardFailure='yes', LogLevel='ERROR', @@ -153,9 +184,7 @@ def create_ssh_command( ssh_options.update(options or {}) - for key, value in sorted(ssh_options.items()): - cmd.extend(['-o', '='.join([key, str(value)])]) - + cmd.extend(ssh_options_to_list(ssh_options)) cmd.extend(cli_args or []) cmd.append(ssh.host) @@ -165,6 +194,18 @@ def create_ssh_command( return cmd +def ssh_options_to_list(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> list[str]: + """Format a dictionary of SSH options as a list suitable for passing to the `ssh` command.""" + return list(itertools.chain.from_iterable( + ('-o', f'{key}={value}') for key, value in sorted(options.items()) + )) + + +def ssh_options_to_str(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> str: + """Format a dictionary of SSH options as a string suitable for passing as `ansible_ssh_extra_args` in inventory.""" + return shlex.join(ssh_options_to_list(options)) + + def run_ssh_command( args, # type: EnvironmentConfig ssh, # type: SshConnectionDetail @@ -202,7 +243,7 @@ def create_ssh_port_forwards( """ options = dict( LogLevel='INFO', # info level required to get messages on stderr indicating the ports assigned to each forward - ) + ) # type: t.Dict[str, t.Union[str, int]] cli_args = [] @@ -221,7 +262,7 @@ def create_ssh_port_redirects( redirects, # type: t.List[t.Tuple[int, str, int]] ): # type: (...) -> SshProcess """Create SSH port redirections using the provided list of tuples (bind_port, target_host, target_port).""" - options = {} + options = {} # type: t.Dict[str, t.Union[str, int]] cli_args = [] for bind_port, target_host, target_port in redirects: @@ -245,7 +286,7 @@ def generate_ssh_inventory(ssh_connections): # type: (t.List[SshConnectionDetai ansible_pipelining='yes', ansible_python_interpreter=ssh.python_interpreter, ansible_shell_type=ssh.shell_type, - ansible_ssh_extra_args='-o UserKnownHostsFile=/dev/null', # avoid changing the test environment + ansible_ssh_extra_args=ssh_options_to_str(dict(UserKnownHostsFile='/dev/null', **ssh.options)), # avoid changing the test environment ansible_ssh_host_key_checking='no', ))) for ssh in ssh_connections), ), diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py index ced111f784..6b29605d92 100644 --- a/test/lib/ansible_test/_internal/target.py +++ b/test/lib/ansible_test/_internal/target.py @@ -155,7 +155,7 @@ def walk_units_targets(): # type: () -> t.Iterable[TestTarget] return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_') -def walk_compile_targets(include_symlinks=True): # type: (bool) -> t.Iterable[TestTarget, ...] +def walk_compile_targets(include_symlinks=True): # type: (bool) -> t.Iterable[TestTarget] """Return an iterable of compile targets.""" return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks) @@ -611,6 +611,9 @@ class IntegrationTarget(CompletionTarget): groups += [a for a in static_aliases if a not in modules] groups += ['module/%s' % m for m in self.modules] + if data_context().content.is_ansible and (self.name == 'ansible-test' or self.name.startswith('ansible-test-')): + groups.append('ansible-test') + if not self.modules: groups.append('non_module') @@ -699,6 +702,8 @@ class IntegrationTarget(CompletionTarget): # configuration + self.retry_never = 'retry/never/' in self.aliases + self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/')))) self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/')))) self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/')))) diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py index 2ebda60eaf..05ec5b5959 100644 --- a/test/lib/ansible_test/_internal/test.py +++ b/test/lib/ansible_test/_internal/test.py @@ -218,7 +218,7 @@ class TestFailure(TestResult): command, # type: str test, # type: str python_version=None, # type: t.Optional[str] - messages=None, # type: t.Optional[t.List[TestMessage]] + messages=None, # type: t.Optional[t.Sequence[TestMessage]] summary=None, # type: t.Optional[str] ): super().__init__(command, test, python_version) @@ -264,10 +264,10 @@ class TestFailure(TestResult): message = 'The test `%s` failed. See stderr output for details.' % command path = '' message = TestMessage(message, path) - print(message) + print(message) # display goes to stderr, this should be on stdout else: for message in self.messages: - print(message) + print(message) # display goes to stderr, this should be on stdout def write_junit(self, args): # type: (TestConfig) -> None """Write results to a junit XML file.""" diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py index 1b2fbec2b8..601f60e44d 100644 --- a/test/lib/ansible_test/_internal/thread.py +++ b/test/lib/ansible_test/_internal/thread.py @@ -1,6 +1,8 @@ """Python threading tools.""" from __future__ import annotations +import collections.abc as c +import contextlib import functools import sys import threading @@ -8,14 +10,14 @@ import queue import typing as t -TCallable = t.TypeVar('TCallable', bound=t.Callable) +TCallable = t.TypeVar('TCallable', bound=t.Callable[..., t.Any]) class WrappedThread(threading.Thread): """Wrapper around Thread which captures results and exceptions.""" def __init__(self, action): # type: (t.Callable[[], t.Any]) -> None super().__init__() - self._result = queue.Queue() + self._result = queue.Queue() # type: queue.Queue[t.Any] self.action = action self.result = None @@ -25,8 +27,8 @@ class WrappedThread(threading.Thread): Do not override. Do not call directly. Executed by the start() method. """ # We truly want to catch anything that the worker thread might do including call sys.exit. - # Therefore we catch *everything* (including old-style class exceptions) - # noinspection PyBroadException, PyPep8 + # Therefore, we catch *everything* (including old-style class exceptions) + # noinspection PyBroadException try: self._result.put((self.action(), None)) # pylint: disable=locally-disabled, bare-except @@ -41,10 +43,7 @@ class WrappedThread(threading.Thread): result, exception = self._result.get() if exception: - if sys.version_info[0] > 2: - raise exception[1].with_traceback(exception[2]) - # noinspection PyRedundantParentheses - exec('raise exception[0], exception[1], exception[2]') # pylint: disable=locally-disabled, exec-used + raise exception[1].with_traceback(exception[2]) self.result = result @@ -61,4 +60,26 @@ def mutex(func): # type: (TCallable) -> TCallable with lock: return func(*args, **kwargs) - return wrapper + return wrapper # type: ignore[return-value] # requires https://www.python.org/dev/peps/pep-0612/ support + + +__named_lock = threading.Lock() +__named_locks: dict[str, threading.Lock] = {} + + +@contextlib.contextmanager +def named_lock(name: str) -> c.Iterator[bool]: + """ + Context manager that provides named locks using threading.Lock instances. + Once named lock instances are created they are not deleted. + Returns True if this is the first instance of the named lock, otherwise False. + """ + with __named_lock: + if lock_instance := __named_locks.get(name): + first = False + else: + first = True + lock_instance = __named_locks[name] = threading.Lock() + + with lock_instance: + yield first diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py index fdd921e113..ce710cdcd2 100644 --- a/test/lib/ansible_test/_internal/util.py +++ b/test/lib/ansible_test/_internal/util.py @@ -1,15 +1,20 @@ """Miscellaneous utility functions and classes.""" from __future__ import annotations +import abc import errno +import enum import fcntl +import importlib.util import inspect +import json +import keyword import os +import platform import pkgutil import random import re import shutil -import socket import stat import string import subprocess @@ -22,6 +27,11 @@ import typing as t from struct import unpack, pack from termios import TIOCGWINSZ +try: + from typing_extensions import TypeGuard # TypeGuard was added in Python 3.9 +except ImportError: + TypeGuard = None + from .encoding import ( to_bytes, to_optional_bytes, @@ -35,6 +45,7 @@ from .io import ( from .thread import ( mutex, + WrappedThread, ) from .constants import ( @@ -48,12 +59,6 @@ TValue = t.TypeVar('TValue') PYTHON_PATHS = {} # type: t.Dict[str, str] -try: - # noinspection PyUnresolvedReferences - MAXFD = subprocess.MAXFD -except AttributeError: - MAXFD = -1 - COVERAGE_CONFIG_NAME = 'coveragerc' ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -79,6 +84,7 @@ ANSIBLE_TEST_CONTROLLER_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'controller' ANSIBLE_TEST_TARGET_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'target') ANSIBLE_TEST_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'tools') +ANSIBLE_TEST_TARGET_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'tools') # Modes are set to allow all users the same level of access. # This permits files to be used in tests that change users. @@ -95,6 +101,41 @@ MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH +class OutputStream(enum.Enum): + """The output stream to use when running a subprocess and redirecting/capturing stdout or stderr.""" + + ORIGINAL = enum.auto() + AUTO = enum.auto() + + def get_buffer(self, original: t.BinaryIO) -> t.BinaryIO: + """Return the correct output buffer to use, taking into account the given original buffer.""" + + if self == OutputStream.ORIGINAL: + return original + + if self == OutputStream.AUTO: + return display.fd.buffer + + raise NotImplementedError(str(self)) + + +class Architecture: + """ + Normalized architecture names. + These are the architectures supported by ansible-test, such as when provisioning remote instances. + """ + X86_64 = 'x86_64' + AARCH64 = 'aarch64' + + +REMOTE_ARCHITECTURES = list(value for key, value in Architecture.__dict__.items() if not key.startswith('__')) + + +def is_valid_identifier(value: str) -> bool: + """Return True if the given value is a valid non-keyword Python identifier, otherwise return False.""" + return value.isidentifier() and not keyword.iskeyword(value) + + def cache(func): # type: (t.Callable[[], TValue]) -> t.Callable[[], TValue] """Enforce exclusive access on a decorated function and cache the result.""" storage = {} # type: t.Dict[None, TValue] @@ -113,6 +154,58 @@ def cache(func): # type: (t.Callable[[], TValue]) -> t.Callable[[], TValue] return wrapper +@mutex +def detect_architecture(python: str) -> t.Optional[str]: + """Detect the architecture of the specified Python and return a normalized version, or None if it cannot be determined.""" + results: t.Dict[str, t.Optional[str]] + + try: + results = detect_architecture.results # type: ignore[attr-defined] + except AttributeError: + results = detect_architecture.results = {} # type: ignore[attr-defined] + + if python in results: + return results[python] + + if python == sys.executable or os.path.realpath(python) == os.path.realpath(sys.executable): + uname = platform.uname() + else: + data = raw_command([python, '-c', 'import json, platform; print(json.dumps(platform.uname()));'], capture=True)[0] + uname = json.loads(data) + + translation = { + 'x86_64': Architecture.X86_64, # Linux, macOS + 'amd64': Architecture.X86_64, # FreeBSD + 'aarch64': Architecture.AARCH64, # Linux, FreeBSD + 'arm64': Architecture.AARCH64, # FreeBSD + } + + candidates = [] + + if len(uname) >= 5: + candidates.append(uname[4]) + + if len(uname) >= 6: + candidates.append(uname[5]) + + candidates = sorted(set(candidates)) + architectures = sorted(set(arch for arch in [translation.get(candidate) for candidate in candidates] if arch)) + + architecture: t.Optional[str] = None + + if not architectures: + display.warning(f'Unable to determine architecture for Python interpreter "{python}" from: {candidates}') + elif len(architectures) == 1: + architecture = architectures[0] + display.info(f'Detected architecture {architecture} for Python interpreter: {python}', verbosity=1) + else: + display.warning(f'Conflicting architectures detected ({architectures}) for Python interpreter "{python}" from: {candidates}') + + results[python] = architecture + + return architecture + + def filter_args(args, filters): # type: (t.List[str], t.Dict[str, int]) -> t.List[str] """Return a filtered version of the given command line arguments.""" remaining = 0 @@ -248,18 +341,46 @@ def get_available_python_versions(): # type: () -> t.Dict[str, str] def raw_command( cmd, # type: t.Iterable[str] - capture=False, # type: bool + capture, # type: bool env=None, # type: t.Optional[t.Dict[str, str]] data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] explain=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + stdin=None, # type: t.Optional[t.Union[t.IO[bytes], int]] + stdout=None, # type: t.Optional[t.Union[t.IO[bytes], int]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] cmd_verbosity=1, # type: int str_errors='strict', # type: str error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return stdout and stderr as a tuple.""" + output_stream = output_stream or OutputStream.AUTO + + if capture and interactive: + raise InternalError('Cannot combine capture=True with interactive=True.') + + if data and interactive: + raise InternalError('Cannot combine data with interactive=True.') + + if stdin and interactive: + raise InternalError('Cannot combine stdin with interactive=True.') + + if stdout and interactive: + raise InternalError('Cannot combine stdout with interactive=True.') + + if stdin and data: + raise InternalError('Cannot combine stdin with data.') + + if stdout and not capture: + raise InternalError('Redirection of stdout requires capture=True to avoid redirection of stderr to stdout.') + + if output_stream != OutputStream.AUTO and capture: + raise InternalError(f'Cannot combine {output_stream=} with capture=True.') + + if output_stream != OutputStream.AUTO and interactive: + raise InternalError(f'Cannot combine {output_stream=} with interactive=True.') + if not cwd: cwd = os.getcwd() @@ -270,7 +391,30 @@ def raw_command( escaped_cmd = ' '.join(shlex.quote(c) for c in cmd) - display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True) + if capture: + description = 'Run' + elif interactive: + description = 'Interactive' + else: + description = 'Stream' + + description += ' command' + + with_types = [] + + if data: + with_types.append('data') + + if stdin: + with_types.append('stdin') + + if stdout: + with_types.append('stdout') + + if with_types: + description += f' with {"/".join(with_types)}' + + display.info(f'{description}: {escaped_cmd}', verbosity=cmd_verbosity, truncate=True) display.info('Working directory: %s' % cwd, verbosity=2) program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning') @@ -288,17 +432,23 @@ def raw_command( if stdin is not None: data = None - communicate = True elif data is not None: stdin = subprocess.PIPE communicate = True - - if stdout: - communicate = True - - if capture: + elif interactive: + pass # allow the subprocess access to our stdin + else: + stdin = subprocess.DEVNULL + + if not interactive: + # When not running interactively, send subprocess stdout/stderr through a pipe. + # This isolates the stdout/stderr of the subprocess from the current process, and also hides the current TTY from it, if any. + # This prevents subprocesses from sharing stdout/stderr with the current process or each other. + # Doing so allows subprocesses to safely make changes to their file handles, such as making them non-blocking (ssh does this). + # This also maintains consistency between local testing and CI systems, which typically do not provide a TTY. + # To maintain output ordering, a single pipe is used for both stdout/stderr when not capturing output unless the output stream is ORIGINAL. stdout = stdout or subprocess.PIPE - stderr = subprocess.PIPE + stderr = subprocess.PIPE if capture or output_stream == OutputStream.ORIGINAL else subprocess.STDOUT communicate = True else: stderr = None @@ -318,7 +468,8 @@ def raw_command( if communicate: data_bytes = to_optional_bytes(data) - stdout_bytes, stderr_bytes = process.communicate(data_bytes) + stdout_bytes, stderr_bytes = communicate_with_process(process, data_bytes, stdout == subprocess.PIPE, stderr == subprocess.PIPE, capture=capture, + output_stream=output_stream) stdout_text = to_optional_text(stdout_bytes, str_errors) or u'' stderr_text = to_optional_text(stderr_bytes, str_errors) or u'' else: @@ -341,6 +492,122 @@ def raw_command( raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime, error_callback) +def communicate_with_process( + process: subprocess.Popen, + stdin: t.Optional[bytes], + stdout: bool, + stderr: bool, + capture: bool, + output_stream: OutputStream, +) -> t.Tuple[bytes, bytes]: + """Communicate with the specified process, handling stdin/stdout/stderr as requested.""" + threads: t.List[WrappedThread] = [] + reader: t.Type[ReaderThread] + + if capture: + reader = CaptureThread + else: + reader = OutputThread + + if stdin is not None: + threads.append(WriterThread(process.stdin, stdin)) + + if stdout: + stdout_reader = reader(process.stdout, output_stream.get_buffer(sys.stdout.buffer)) + threads.append(stdout_reader) + else: + stdout_reader = None + + if stderr: + stderr_reader = reader(process.stderr, output_stream.get_buffer(sys.stderr.buffer)) + threads.append(stderr_reader) + else: + stderr_reader = None + + for thread in threads: + thread.start() + + for thread in threads: + try: + thread.wait_for_result() + except Exception as ex: # pylint: disable=broad-except + display.error(str(ex)) + + if isinstance(stdout_reader, ReaderThread): + stdout_bytes = b''.join(stdout_reader.lines) + else: + stdout_bytes = b'' + + if isinstance(stderr_reader, ReaderThread): + stderr_bytes = b''.join(stderr_reader.lines) + else: + stderr_bytes = b'' + + process.wait() + + return stdout_bytes, stderr_bytes + + +class WriterThread(WrappedThread): + """Thread to write data to stdin of a subprocess.""" + def __init__(self, handle: t.IO[bytes], data: bytes) -> None: + super().__init__(self._run) + + self.handle = handle + self.data = data + + def _run(self) -> None: + """Workload to run on a thread.""" + try: + self.handle.write(self.data) + self.handle.flush() + finally: + self.handle.close() + + +class ReaderThread(WrappedThread, metaclass=abc.ABCMeta): + """Thread to read stdout from a subprocess.""" + def __init__(self, handle: t.IO[bytes], buffer: t.BinaryIO) -> None: + super().__init__(self._run) + + self.handle = handle + self.buffer = buffer + self.lines = [] # type: t.List[bytes] + + @abc.abstractmethod + def _run(self) -> None: + """Workload to run on a thread.""" + + +class CaptureThread(ReaderThread): + """Thread to capture stdout from a subprocess into a buffer.""" + def _run(self) -> None: + """Workload to run on a thread.""" + src = self.handle + dst = self.lines + + try: + for line in src: + dst.append(line) + finally: + src.close() + + +class OutputThread(ReaderThread): + """Thread to pass stdout from a subprocess to stdout.""" + def _run(self) -> None: + """Workload to run on a thread.""" + src = self.handle + dst = self.buffer + + try: + for line in src: + dst.write(line) + dst.flush() + finally: + src.close() + + def common_environment(): """Common environment used for executing all programs.""" env = dict( @@ -404,6 +671,16 @@ def pass_vars(required, optional): # type: (t.Collection[str], t.Collection[str return env +def verified_chmod(path: str, mode: int) -> None: + """Perform chmod on the specified path and then verify the permissions were applied.""" + os.chmod(path, mode) # pylint: disable=ansible-bad-function + + executable = any(mode & perm for perm in (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH)) + + if executable and not os.access(path, os.X_OK): + raise ApplicationError(f'Path "{path}" should executable, but is not. Is the filesystem mounted with the "noexec" option?') + + def remove_tree(path): # type: (str) -> None """Remove the specified directory, siliently continuing if the directory does not exist.""" try: @@ -466,7 +743,6 @@ def is_binary_file(path): # type: (str) -> bool return True with open_binary_file(path) as path_fd: - # noinspection PyTypeChecker return b'\0' in path_fd.read(4096) @@ -514,7 +790,7 @@ class Display: self.color = sys.stdout.isatty() self.warnings = [] self.warnings_unique = set() - self.info_stderr = False + self.fd = sys.stderr # default to stderr until config is initialized to avoid early messages going to stdout self.rows = 0 self.columns = 0 self.truncate = 0 @@ -526,7 +802,7 @@ class Display: def __warning(self, message): # type: (str) -> None """Internal implementation for displaying a warning message.""" - self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr) + self.print_message('WARNING: %s' % message, color=self.purple) def review_warnings(self): # type: () -> None """Review all warnings which previously occurred.""" @@ -554,23 +830,27 @@ class Display: def notice(self, message): # type: (str) -> None """Display a notice level message.""" - self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr) + self.print_message('NOTICE: %s' % message, color=self.purple) def error(self, message): # type: (str) -> None """Display an error level message.""" - self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr) + self.print_message('ERROR: %s' % message, color=self.red) + + def fatal(self, message): # type: (str) -> None + """Display a fatal level message.""" + self.print_message('FATAL: %s' % message, color=self.red, stderr=True) def info(self, message, verbosity=0, truncate=False): # type: (str, int, bool) -> None """Display an info level message.""" if self.verbosity >= verbosity: color = self.verbosity_colors.get(verbosity, self.yellow) - self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate) + self.print_message(message, color=color, truncate=truncate) def print_message( # pylint: disable=locally-disabled, invalid-name self, message, # type: str color=None, # type: t.Optional[str] - fd=sys.stdout, # type: t.TextIO + stderr=False, # type: bool truncate=False, # type: bool ): # type: (...) -> None """Display a message.""" @@ -590,13 +870,18 @@ class Display: message = message.replace(self.clear, color) message = '%s%s%s' % (color, message, self.clear) - if sys.version_info[0] == 2: - message = to_bytes(message) + fd = sys.stderr if stderr else self.fd print(message, file=fd) fd.flush() +class InternalError(Exception): + """An unhandled internal error indicating a bug in the code.""" + def __init__(self, message: str) -> None: + super().__init__(f'An internal error has occurred in ansible-test: {message}') + + class ApplicationError(Exception): """General application error.""" @@ -649,12 +934,32 @@ class MissingEnvironmentVariable(ApplicationError): self.name = name -def retry(func, ex_type=SubprocessError, sleep=10, attempts=10): +class HostConnectionError(ApplicationError): + """ + Raised when the initial connection during host profile setup has failed and all retries have been exhausted. + Raised by provisioning code when one or more provisioning threads raise this exception. + Also raised when an SSH connection fails for the shell command. + """ + def __init__(self, message: str, callback: t.Callable[[], None] = None) -> None: + super().__init__(message) + + self._callback = callback + + def run_callback(self) -> None: + """Run the error callback, if any.""" + if self._callback: + self._callback() + + +def retry(func, ex_type=SubprocessError, sleep=10, attempts=10, warn=True): """Retry the specified function on failure.""" for dummy in range(1, attempts): try: return func() - except ex_type: + except ex_type as ex: + if warn: + display.warning(str(ex)) + time.sleep(sleep) return func() @@ -771,23 +1076,10 @@ def load_module(path, name): # type: (str, str) -> None if name in sys.modules: return - if sys.version_info >= (3, 4): - import importlib.util - - spec = importlib.util.spec_from_file_location(name, path) - module = importlib.util.module_from_spec(spec) - # noinspection PyUnresolvedReferences - spec.loader.exec_module(module) - - sys.modules[name] = module - else: - # noinspection PyDeprecation - import imp # pylint: disable=deprecated-module - - # load_source (and thus load_module) require a file opened with `open` in text mode - with open(to_bytes(path)) as module_file: - # noinspection PyDeprecation - imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE)) + spec = importlib.util.spec_from_file_location(name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) def sanitize_host_name(name): @@ -795,18 +1087,6 @@ def sanitize_host_name(name): return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-') -@cache -def get_host_ip(): - """Return the host's IP address.""" - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: - sock.connect(('10.255.255.255', 22)) - host_ip = get_host_ip.ip = sock.getsockname()[0] - - display.info('Detected host IP: %s' % host_ip, verbosity=1) - - return host_ip - - def get_generic_type(base_type, generic_base_type): # type: (t.Type, t.Type[TType]) -> t.Optional[t.Type[TType]] """Return the generic type arg derived from the generic_base_type type that is associated with the base_type type, if any, otherwise return None.""" # noinspection PyUnresolvedReferences @@ -840,4 +1120,19 @@ def verify_sys_executable(path): # type: (str) -> t.Optional[str] return expected_executable +def type_guard(sequence: t.Sequence[t.Any], guard_type: t.Type[C]) -> TypeGuard[t.Sequence[C]]: + """ + Raises an exception if any item in the given sequence does not match the specified guard type. + Use with assert so that type checkers are aware of the type guard. + """ + invalid_types = set(type(item) for item in sequence if not isinstance(item, guard_type)) + + if not invalid_types: + return True + + invalid_type_names = sorted(str(item) for item in invalid_types) + + raise Exception(f'Sequence required to contain only {guard_type} includes: {", ".join(invalid_type_names)}') + + display = Display() # pylint: disable=locally-disabled, invalid-name diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py index f77040b170..ecf8ae6676 100644 --- a/test/lib/ansible_test/_internal/util_common.py +++ b/test/lib/ansible_test/_internal/util_common.py @@ -28,14 +28,16 @@ from .util import ( MODE_DIRECTORY, MODE_FILE_EXECUTE, MODE_FILE, + OutputStream, PYTHON_PATHS, raw_command, ANSIBLE_TEST_DATA_ROOT, ANSIBLE_TEST_TARGET_ROOT, - ANSIBLE_TEST_TOOLS_ROOT, + ANSIBLE_TEST_TARGET_TOOLS_ROOT, ApplicationError, SubprocessError, generate_name, + verified_chmod, ) from .io import ( @@ -58,7 +60,7 @@ from .host_configs import ( VirtualPythonConfig, ) -CHECK_YAML_VERSIONS = {} +CHECK_YAML_VERSIONS = {} # type: t.Dict[str, t.Any] class ShellScriptTemplate: @@ -66,7 +68,7 @@ class ShellScriptTemplate: def __init__(self, template): # type: (t.Text) -> None self.template = template - def substitute(self, **kwargs): # type: (t.Dict[str, t.Union[str, t.List[str]]]) -> str + def substitute(self, **kwargs: t.Union[str, t.List[str]]) -> str: """Return a string templated with the given arguments.""" kvp = dict((k, self.quote(v)) for k, v in kwargs.items()) pattern = re.compile(r'#{(?P<name>[^}]+)}') @@ -127,6 +129,8 @@ class CommonConfig: """Configuration common to all commands.""" def __init__(self, args, command): # type: (t.Any, str) -> None self.command = command + self.interactive = False + self.check_layout = True self.success = None # type: t.Optional[bool] self.color = args.color # type: bool @@ -136,11 +140,11 @@ class CommonConfig: self.truncate = args.truncate # type: int self.redact = args.redact # type: bool - self.info_stderr = False # type: bool + self.display_stderr = False # type: bool self.session_name = generate_name() - self.cache = {} + self.cache = {} # type: t.Dict[str, t.Any] def get_ansible_config(self): # type: () -> str """Return the path to the Ansible config for the given config.""" @@ -220,15 +224,8 @@ def process_scoped_temporary_directory(args, prefix='ansible-test-', suffix=None @contextlib.contextmanager -def named_temporary_file(args, prefix, suffix, directory, content): - """ - :param args: CommonConfig - :param prefix: str - :param suffix: str - :param directory: str - :param content: str | bytes | unicode - :rtype: str - """ +def named_temporary_file(args, prefix, suffix, directory, content): # type: (CommonConfig, str, str, t.Optional[str], str) -> t.Iterator[str] + """Context manager for a named temporary file.""" if args.explain: yield os.path.join(directory or '/tmp', '%stemp%s' % (prefix, suffix)) else: @@ -243,7 +240,7 @@ def write_json_test_results(category, # type: ResultType name, # type: str content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]] formatted=True, # type: bool - encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] + encoder=None, # type: t.Optional[t.Type[json.JSONEncoder]] ): # type: (...) -> None """Write the given json content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) @@ -286,9 +283,9 @@ def get_injector_path(): # type: () -> str script = set_shebang(script, shebang) write_text_file(dst, script) - os.chmod(dst, mode) + verified_chmod(dst, mode) - os.chmod(injector_path, MODE_DIRECTORY) + verified_chmod(injector_path, MODE_DIRECTORY) def cleanup_injector(): """Remove the temporary injector directory.""" @@ -349,7 +346,7 @@ def get_python_path(interpreter): # type: (str) -> str create_interpreter_wrapper(interpreter, injected_interpreter) - os.chmod(python_path, MODE_DIRECTORY) + verified_chmod(python_path, MODE_DIRECTORY) if not PYTHON_PATHS: atexit.register(cleanup_python_paths) @@ -387,7 +384,7 @@ def create_interpreter_wrapper(interpreter, injected_interpreter): # type: (str write_text_file(injected_interpreter, code) - os.chmod(injected_interpreter, MODE_FILE_EXECUTE) + verified_chmod(injected_interpreter, MODE_FILE_EXECUTE) def cleanup_python_paths(): @@ -402,7 +399,7 @@ def intercept_python( python, # type: PythonConfig cmd, # type: t.List[str] env, # type: t.Dict[str, str] - capture=False, # type: bool + capture, # type: bool data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] always=False, # type: bool @@ -432,26 +429,28 @@ def intercept_python( def run_command( args, # type: CommonConfig cmd, # type: t.Iterable[str] - capture=False, # type: bool + capture, # type: bool env=None, # type: t.Optional[t.Dict[str, str]] data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] always=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + stdin=None, # type: t.Optional[t.IO[bytes]] + stdout=None, # type: t.Optional[t.IO[bytes]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] cmd_verbosity=1, # type: int str_errors='strict', # type: str error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return stdout and stderr as a tuple.""" explain = args.explain and not always - return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, - cmd_verbosity=cmd_verbosity, str_errors=str_errors, error_callback=error_callback) + return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, interactive=interactive, + output_stream=output_stream, cmd_verbosity=cmd_verbosity, str_errors=str_errors, error_callback=error_callback) def yamlcheck(python): """Return True if PyYAML has libyaml support, False if it does not and None if it was not found.""" - result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0]) + result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0]) if not result['yaml']: return None diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py index cf436775bd..a50f9b54e4 100644 --- a/test/lib/ansible_test/_internal/venv.py +++ b/test/lib/ansible_test/_internal/venv.py @@ -15,11 +15,12 @@ from .util import ( find_python, SubprocessError, get_available_python_versions, - ANSIBLE_TEST_TOOLS_ROOT, + ANSIBLE_TEST_TARGET_TOOLS_ROOT, display, remove_tree, ApplicationError, str_to_version, + raw_command, ) from .util_common import ( @@ -92,7 +93,7 @@ def create_virtual_environment(args, # type: EnvironmentConfig # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results # in a copy of the original virtual environment instead of creation of a new one # avoid this issue by only using "real" python interpreters to invoke 'venv' - for real_python in iterate_real_pythons(args, python.version): + for real_python in iterate_real_pythons(python.version): if run_venv(args, real_python, system_site_packages, pip, path): display.info('Created Python %s virtual environment using "venv": %s' % (python.version, path), verbosity=1) return True @@ -132,7 +133,7 @@ def create_virtual_environment(args, # type: EnvironmentConfig return False -def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t.Iterable[str] +def iterate_real_pythons(version): # type: (str) -> t.Iterable[str] """ Iterate through available real python interpreters of the requested version. The current interpreter will be checked and then the path will be searched. @@ -142,7 +143,7 @@ def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t. if version_info == sys.version_info[:len(version_info)]: current_python = sys.executable - real_prefix = get_python_real_prefix(args, current_python) + real_prefix = get_python_real_prefix(current_python) if real_prefix: current_python = find_python(version, os.path.join(real_prefix, 'bin')) @@ -163,7 +164,7 @@ def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t. if found_python == current_python: return - real_prefix = get_python_real_prefix(args, found_python) + real_prefix = get_python_real_prefix(found_python) if real_prefix: found_python = find_python(version, os.path.join(real_prefix, 'bin')) @@ -172,12 +173,12 @@ def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t. yield found_python -def get_python_real_prefix(args, python_path): # type: (EnvironmentConfig, str) -> t.Optional[str] +def get_python_real_prefix(python_path): # type: (str) -> t.Optional[str] """ Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'. """ - cmd = [python_path, os.path.join(os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'virtualenvcheck.py'))] - check_result = json.loads(run_command(args, cmd, capture=True, always=True)[0]) + cmd = [python_path, os.path.join(os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'virtualenvcheck.py'))] + check_result = json.loads(raw_command(cmd, capture=True)[0]) real_prefix = check_result['real_prefix'] return real_prefix @@ -205,7 +206,7 @@ def run_venv(args, # type: EnvironmentConfig remove_tree(path) if args.verbosity > 1: - display.error(ex) + display.error(ex.message) return False @@ -241,7 +242,7 @@ def run_virtualenv(args, # type: EnvironmentConfig remove_tree(path) if args.verbosity > 1: - display.error(ex) + display.error(ex.message) return False @@ -249,11 +250,11 @@ def run_virtualenv(args, # type: EnvironmentConfig def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]] - """Get the virtualenv version for the given python intepreter, if available, otherwise return None.""" + """Get the virtualenv version for the given python interpreter, if available, otherwise return None.""" try: - cache = get_virtualenv_version.cache + cache = get_virtualenv_version.cache # type: ignore[attr-defined] except AttributeError: - cache = get_virtualenv_version.cache = {} + cache = get_virtualenv_version.cache = {} # type: ignore[attr-defined] if python not in cache: try: @@ -262,7 +263,7 @@ def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t stdout = '' if args.verbosity > 1: - display.error(ex) + display.error(ex.message) version = None |