summaryrefslogtreecommitdiff
path: root/test/lib/ansible_test/_internal
diff options
context:
space:
mode:
Diffstat (limited to 'test/lib/ansible_test/_internal')
-rw-r--r--test/lib/ansible_test/_internal/ansible_util.py13
-rw-r--r--test/lib/ansible_test/_internal/ci/azp.py1
-rw-r--r--test/lib/ansible_test/_internal/cli.py47
-rw-r--r--test/lib/ansible_test/_internal/cloud/__init__.py43
-rw-r--r--test/lib/ansible_test/_internal/cloud/acme.py149
-rw-r--r--test/lib/ansible_test/_internal/cloud/aws.py11
-rw-r--r--test/lib/ansible_test/_internal/cloud/azure.py5
-rw-r--r--test/lib/ansible_test/_internal/cloud/cloudscale.py11
-rw-r--r--test/lib/ansible_test/_internal/cloud/cs.py205
-rw-r--r--test/lib/ansible_test/_internal/cloud/foreman.py121
-rw-r--r--test/lib/ansible_test/_internal/cloud/galaxy.py129
-rw-r--r--test/lib/ansible_test/_internal/cloud/gcp.py16
-rw-r--r--test/lib/ansible_test/_internal/cloud/hcloud.py7
-rw-r--r--test/lib/ansible_test/_internal/cloud/httptester.py92
-rw-r--r--test/lib/ansible_test/_internal/cloud/nios.py122
-rw-r--r--test/lib/ansible_test/_internal/cloud/opennebula.py6
-rw-r--r--test/lib/ansible_test/_internal/cloud/openshift.py176
-rw-r--r--test/lib/ansible_test/_internal/cloud/scaleway.py10
-rw-r--r--test/lib/ansible_test/_internal/cloud/vcenter.py143
-rw-r--r--test/lib/ansible_test/_internal/cloud/vultr.py11
-rw-r--r--test/lib/ansible_test/_internal/config.py12
-rw-r--r--test/lib/ansible_test/_internal/containers.py755
-rw-r--r--test/lib/ansible_test/_internal/core_ci.py3
-rw-r--r--test/lib/ansible_test/_internal/delegation.py175
-rw-r--r--test/lib/ansible_test/_internal/docker_util.py331
-rw-r--r--test/lib/ansible_test/_internal/env.py10
-rw-r--r--test/lib/ansible_test/_internal/executor.py398
-rw-r--r--test/lib/ansible_test/_internal/sanity/integration_aliases.py11
-rw-r--r--test/lib/ansible_test/_internal/ssh.py264
-rw-r--r--test/lib/ansible_test/_internal/target.py3
-rw-r--r--test/lib/ansible_test/_internal/util.py57
-rw-r--r--test/lib/ansible_test/_internal/util_common.py2
32 files changed, 1830 insertions, 1509 deletions
diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py
index 339eff6908..a949f903b3 100644
--- a/test/lib/ansible_test/_internal/ansible_util.py
+++ b/test/lib/ansible_test/_internal/ansible_util.py
@@ -31,6 +31,7 @@ from .util_common import (
create_temp_dir,
run_command,
ResultType,
+ intercept_command,
)
from .config import (
@@ -295,3 +296,15 @@ def get_collection_detail(args, python): # type: (EnvironmentConfig, str) -> Co
detail.version = str(version) if version is not None else None
return detail
+
+
+def run_playbook(args, inventory_path, playbook, run_playbook_vars): # type: (CommonConfig, str, str, t.Dict[str, t.Any]) -> None
+ """Run the specified playbook using the given inventory file and playbook variables."""
+ playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
+ command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)]
+
+ if args.verbosity:
+ command.append('-%s' % ('v' * args.verbosity))
+
+ env = ansible_environment(args)
+ intercept_command(args, command, '', env, disable_coverage=True)
diff --git a/test/lib/ansible_test/_internal/ci/azp.py b/test/lib/ansible_test/_internal/ci/azp.py
index 807c115bf1..d4a30c1d14 100644
--- a/test/lib/ansible_test/_internal/ci/azp.py
+++ b/test/lib/ansible_test/_internal/ci/azp.py
@@ -3,7 +3,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-import re
import tempfile
import uuid
diff --git a/test/lib/ansible_test/_internal/cli.py b/test/lib/ansible_test/_internal/cli.py
index 15a235180b..fc360b3067 100644
--- a/test/lib/ansible_test/_internal/cli.py
+++ b/test/lib/ansible_test/_internal/cli.py
@@ -176,7 +176,7 @@ def main():
delegate_args = None
except Delegate as ex:
# save delegation args for use once we exit the exception handler
- delegate_args = (ex.exclude, ex.require, ex.integration_targets)
+ delegate_args = (ex.exclude, ex.require)
if delegate_args:
# noinspection PyTypeChecker
@@ -324,7 +324,7 @@ def parse_args():
help='base branch used for change detection')
add_changes(test, argparse)
- add_environments(test)
+ add_environments(test, argparse)
integration = argparse.ArgumentParser(add_help=False, parents=[test])
@@ -423,7 +423,6 @@ def parse_args():
config=PosixIntegrationConfig)
add_extra_docker_options(posix_integration)
- add_httptester_options(posix_integration, argparse)
network_integration = subparsers.add_parser('network-integration',
parents=[integration],
@@ -469,7 +468,6 @@ def parse_args():
config=WindowsIntegrationConfig)
add_extra_docker_options(windows_integration, integration=False)
- add_httptester_options(windows_integration, argparse)
windows_integration.add_argument('--windows',
metavar='VERSION',
@@ -564,13 +562,12 @@ def parse_args():
action='store_true',
help='direct to shell with no setup')
- add_environments(shell)
+ add_environments(shell, argparse)
add_extra_docker_options(shell)
- add_httptester_options(shell, argparse)
coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
- add_environments(coverage_common, isolated_delegation=False)
+ add_environments(coverage_common, argparse, isolated_delegation=False)
coverage = subparsers.add_parser('coverage',
help='code coverage management and reporting')
@@ -896,9 +893,10 @@ def add_changes(parser, argparse):
changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
-def add_environments(parser, isolated_delegation=True):
+def add_environments(parser, argparse, isolated_delegation=True):
"""
:type parser: argparse.ArgumentParser
+ :type argparse: argparse
:type isolated_delegation: bool
"""
parser.add_argument('--requirements',
@@ -934,6 +932,7 @@ def add_environments(parser, isolated_delegation=True):
if not isolated_delegation:
environments.set_defaults(
+ containers=None,
docker=None,
remote=None,
remote_stage=None,
@@ -945,6 +944,9 @@ def add_environments(parser, isolated_delegation=True):
return
+ parser.add_argument('--containers',
+ help=argparse.SUPPRESS) # internal use only
+
environments.add_argument('--docker',
metavar='IMAGE',
nargs='?',
@@ -1001,32 +1003,6 @@ def add_extra_coverage_options(parser):
help='generate empty report of all python/powershell source files')
-def add_httptester_options(parser, argparse):
- """
- :type parser: argparse.ArgumentParser
- :type argparse: argparse
- """
- group = parser.add_mutually_exclusive_group()
-
- group.add_argument('--httptester',
- metavar='IMAGE',
- default='quay.io/ansible/http-test-container:1.3.0',
- help='docker image to use for the httptester container')
-
- group.add_argument('--disable-httptester',
- dest='httptester',
- action='store_const',
- const='',
- help='do not use the httptester container')
-
- parser.add_argument('--inject-httptester',
- action='store_true',
- help=argparse.SUPPRESS) # internal use only
-
- parser.add_argument('--httptester-krb5-password',
- help=argparse.SUPPRESS) # internal use only
-
-
def add_extra_docker_options(parser, integration=True):
"""
:type parser: argparse.ArgumentParser
@@ -1119,9 +1095,8 @@ def complete_remote_shell(prefix, parsed_args, **_):
images = sorted(get_remote_completion().keys())
- # 2008 doesn't support SSH so we do not add to the list of valid images
windows_completion_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt')
- images.extend(["windows/%s" % i for i in read_lines_without_comments(windows_completion_path, remove_blank_lines=True) if i != '2008'])
+ images.extend(["windows/%s" % i for i in read_lines_without_comments(windows_completion_path, remove_blank_lines=True)])
return [i for i in images if i.startswith(prefix)]
diff --git a/test/lib/ansible_test/_internal/cloud/__init__.py b/test/lib/ansible_test/_internal/cloud/__init__.py
index 08a1183e06..23463330d3 100644
--- a/test/lib/ansible_test/_internal/cloud/__init__.py
+++ b/test/lib/ansible_test/_internal/cloud/__init__.py
@@ -50,6 +50,10 @@ from ..data import (
data_context,
)
+from ..docker_util import (
+ get_docker_command,
+)
+
PROVIDERS = {}
ENVIRONMENTS = {}
@@ -197,6 +201,9 @@ class CloudBase(ABC):
def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add the config file to the payload file list."""
+ if self.platform not in self.args.metadata.cloud_config:
+ return # platform was initialized, but not used -- such as being skipped due to all tests being disabled
+
if self._get_cloud_config(self._CONFIG_PATH, ''):
pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root))
@@ -297,18 +304,38 @@ class CloudProvider(CloudBase):
self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
self.config_extension = config_extension
+ self.uses_config = False
+ self.uses_docker = False
+
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
+ if not self.uses_docker and not self.uses_config:
+ return
+
+ if self.uses_docker and get_docker_command():
+ return
+
+ if self.uses_config and os.path.exists(self.config_static_path):
+ return
+
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
- display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
- % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ if not self.uses_docker and self.uses_config:
+ display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ elif self.uses_docker and not self.uses_config:
+ display.warning('Excluding tests marked "%s" which requires container support: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+ elif self.uses_docker and self.uses_config:
+ display.warning('Excluding tests marked "%s" which requires container support or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
@@ -317,18 +344,6 @@ class CloudProvider(CloudBase):
atexit.register(self.cleanup)
- def get_remote_ssh_options(self):
- """Get any additional options needed when delegating tests to a remote instance via SSH.
- :rtype: list[str]
- """
- return []
-
- def get_docker_run_options(self):
- """Get any additional options needed when delegating tests to a docker container.
- :rtype: list[str]
- """
- return []
-
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.remove_config:
diff --git a/test/lib/ansible_test/_internal/cloud/acme.py b/test/lib/ansible_test/_internal/cloud/acme.py
index 3d0ace24e7..ae92dfa9ff 100644
--- a/test/lib/ansible_test/_internal/cloud/acme.py
+++ b/test/lib/ansible_test/_internal/cloud/acme.py
@@ -3,7 +3,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-import time
from . import (
CloudProvider,
@@ -11,27 +10,8 @@ from . import (
CloudEnvironmentConfig,
)
-from ..util import (
- find_executable,
- display,
- ApplicationError,
- SubprocessError,
-)
-
-from ..http import (
- HttpClient,
-)
-
-from ..docker_util import (
- docker_run,
- docker_rm,
- docker_inspect,
- docker_pull,
- get_docker_container_id,
- get_docker_hostname,
- get_docker_container_ip,
- get_docker_preferred_network_name,
- is_docker_user_defined_network,
+from ..containers import (
+ run_support_container,
)
@@ -50,46 +30,8 @@ class ACMEProvider(CloudProvider):
self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
else:
self.image = 'quay.io/ansible/acme-test-container:2.0.0'
- self.container_name = ''
-
- def _wait_for_service(self, protocol, acme_host, port, local_part, name):
- """Wait for an endpoint to accept connections."""
- if self.args.explain:
- return
-
- client = HttpClient(self.args, always=True, insecure=True)
- endpoint = '%s://%s:%d/%s' % (protocol, acme_host, port, local_part)
-
- for dummy in range(1, 30):
- display.info('Waiting for %s: %s' % (name, endpoint), verbosity=1)
-
- try:
- client.get(endpoint)
- return
- except SubprocessError:
- pass
- time.sleep(1)
-
- raise ApplicationError('Timeout waiting for %s.' % name)
-
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- docker = find_executable('docker', required=False)
-
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
-
- if skipped:
- exclude.append(skip)
- display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
- % (skip.rstrip('/'), ', '.join(skipped)))
+ self.uses_docker = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
@@ -100,79 +42,26 @@ class ACMEProvider(CloudProvider):
else:
self._setup_dynamic()
- def get_docker_run_options(self):
- """Get any additional options needed when delegating tests to a docker container.
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if self.managed and not is_docker_user_defined_network(network):
- return ['--link', self.DOCKER_SIMULATOR_NAME]
-
- return []
-
- def cleanup(self):
- """Clean up the cloud resource and any temporary configuration files after tests complete."""
- if self.container_name:
- docker_rm(self.args, self.container_name)
-
- super(ACMEProvider, self).cleanup()
-
def _setup_dynamic(self):
"""Create a ACME test container using docker."""
- container_id = get_docker_container_id()
-
- self.container_name = self.DOCKER_SIMULATOR_NAME
-
- results = docker_inspect(self.args, self.container_name)
-
- if results and not results[0].get('State', {}).get('Running'):
- docker_rm(self.args, self.container_name)
- results = []
-
- if results:
- display.info('Using the existing ACME docker test container.', verbosity=1)
- else:
- display.info('Starting a new ACME docker test container.', verbosity=1)
-
- if not container_id:
- # publish the simulator ports when not running inside docker
- publish_ports = [
- '-p', '5000:5000', # control port for flask app in container
- '-p', '14000:14000', # Pebble ACME CA
- ]
- else:
- publish_ports = []
-
- if not os.environ.get('ANSIBLE_ACME_CONTAINER'):
- docker_pull(self.args, self.image)
-
- docker_run(
- self.args,
- self.image,
- ['-d', '--name', self.container_name] + publish_ports,
- )
-
- if self.args.docker:
- acme_host = self.DOCKER_SIMULATOR_NAME
- elif container_id:
- acme_host = self._get_simulator_address()
- display.info('Found ACME test container address: %s' % acme_host, verbosity=1)
- else:
- acme_host = get_docker_hostname()
-
- if container_id:
- acme_host_ip = self._get_simulator_address()
- else:
- acme_host_ip = get_docker_hostname()
-
- self._set_cloud_config('acme_host', acme_host)
+ ports = [
+ 5000, # control port for flask app in container
+ 14000, # Pebble ACME CA
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=True,
+ )
- self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller')
- self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint')
+ descriptor.register(self.args)
- def _get_simulator_address(self):
- return get_docker_container_ip(self.args, self.container_name)
+ self._set_cloud_config('acme_host', self.DOCKER_SIMULATOR_NAME)
def _setup_static(self):
raise NotImplementedError()
diff --git a/test/lib/ansible_test/_internal/cloud/aws.py b/test/lib/ansible_test/_internal/cloud/aws.py
index 1ff8977561..a8b812dadf 100644
--- a/test/lib/ansible_test/_internal/cloud/aws.py
+++ b/test/lib/ansible_test/_internal/cloud/aws.py
@@ -23,14 +23,19 @@ from ..core_ci import (
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(AwsCloudProvider, self).__init__(args)
+
+ self.uses_config = True
+
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
- if os.path.isfile(self.config_static_path):
- return
-
aci = self._create_ansible_core_ci()
if aci.available:
diff --git a/test/lib/ansible_test/_internal/cloud/azure.py b/test/lib/ansible_test/_internal/cloud/azure.py
index 2efe96f8eb..b7eb5dfac6 100644
--- a/test/lib/ansible_test/_internal/cloud/azure.py
+++ b/test/lib/ansible_test/_internal/cloud/azure.py
@@ -44,14 +44,13 @@ class AzureCloudProvider(CloudProvider):
self.aci = None
+ self.uses_config = True
+
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
- if os.path.isfile(self.config_static_path):
- return
-
aci = self._create_ansible_core_ci()
if aci.available:
diff --git a/test/lib/ansible_test/_internal/cloud/cloudscale.py b/test/lib/ansible_test/_internal/cloud/cloudscale.py
index 8e5885b2d5..1d3ef5b86c 100644
--- a/test/lib/ansible_test/_internal/cloud/cloudscale.py
+++ b/test/lib/ansible_test/_internal/cloud/cloudscale.py
@@ -22,22 +22,13 @@ class CloudscaleCloudProvider(CloudProvider):
"""Cloudscale cloud provider plugin. Sets up cloud resources before
delegation.
"""
-
def __init__(self, args):
"""
:type args: TestConfig
"""
super(CloudscaleCloudProvider, self).__init__(args)
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- if os.path.isfile(self.config_static_path):
- return
-
- super(CloudscaleCloudProvider, self).filter(targets, exclude)
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
diff --git a/test/lib/ansible_test/_internal/cloud/cs.py b/test/lib/ansible_test/_internal/cloud/cs.py
index 1f30b984e9..88ee1340fc 100644
--- a/test/lib/ansible_test/_internal/cloud/cs.py
+++ b/test/lib/ansible_test/_internal/cloud/cs.py
@@ -4,8 +4,6 @@ __metaclass__ = type
import json
import os
-import re
-import time
from . import (
CloudProvider,
@@ -14,30 +12,22 @@ from . import (
)
from ..util import (
- find_executable,
ApplicationError,
display,
- SubprocessError,
ConfigParser,
)
from ..http import (
- HttpClient,
- HttpError,
urlparse,
)
from ..docker_util import (
- docker_run,
- docker_rm,
- docker_inspect,
- docker_pull,
- docker_network_inspect,
docker_exec,
- get_docker_container_id,
- get_docker_preferred_network_name,
- get_docker_hostname,
- is_docker_user_defined_network,
+)
+
+from ..containers import (
+ run_support_container,
+ wait_for_file,
)
@@ -52,31 +42,11 @@ class CsCloudProvider(CloudProvider):
super(CsCloudProvider, self).__init__(args)
self.image = os.environ.get('ANSIBLE_CLOUDSTACK_CONTAINER', 'quay.io/ansible/cloudstack-test-container:1.4.0')
- self.container_name = ''
- self.endpoint = ''
self.host = ''
self.port = 0
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- if os.path.isfile(self.config_static_path):
- return
-
- docker = find_executable('docker', required=False)
-
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
-
- if skipped:
- exclude.append(skip)
- display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
- % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ self.uses_docker = True
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
@@ -87,49 +57,19 @@ class CsCloudProvider(CloudProvider):
else:
self._setup_dynamic()
- def get_remote_ssh_options(self):
- """Get any additional options needed when delegating tests to a remote instance via SSH.
- :rtype: list[str]
- """
- if self.managed:
- return ['-R', '8888:%s:8888' % get_docker_hostname()]
-
- return []
-
- def get_docker_run_options(self):
- """Get any additional options needed when delegating tests to a docker container.
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if self.managed and not is_docker_user_defined_network(network):
- return ['--link', self.DOCKER_SIMULATOR_NAME]
-
- return []
-
- def cleanup(self):
- """Clean up the cloud resource and any temporary configuration files after tests complete."""
- if self.container_name:
- if self.ci_provider.code:
- docker_rm(self.args, self.container_name)
- elif not self.args.explain:
- display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
-
- super(CsCloudProvider, self).cleanup()
-
def _setup_static(self):
"""Configure CloudStack tests for use with static configuration."""
parser = ConfigParser()
parser.read(self.config_static_path)
- self.endpoint = parser.get('cloudstack', 'endpoint')
+ endpoint = parser.get('cloudstack', 'endpoint')
- parts = urlparse(self.endpoint)
+ parts = urlparse(endpoint)
self.host = parts.hostname
if not self.host:
- raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
+ raise ApplicationError('Could not determine host from endpoint: %s' % endpoint)
if parts.port:
self.port = parts.port
@@ -138,50 +78,35 @@ class CsCloudProvider(CloudProvider):
elif parts.scheme == 'https':
self.port = 443
else:
- raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
+ raise ApplicationError('Could not determine port from endpoint: %s' % endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
- self._wait_for_service()
-
def _setup_dynamic(self):
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
- self.container_name = self.DOCKER_SIMULATOR_NAME
-
- results = docker_inspect(self.args, self.container_name)
-
- if results and not results[0]['State']['Running']:
- docker_rm(self.args, self.container_name)
- results = []
-
- if results:
- display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
- else:
- display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
- docker_pull(self.args, self.image)
- docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
-
- # apply work-around for OverlayFS issue
- # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
- docker_exec(self.args, self.container_name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'])
-
- if not self.args.explain:
- display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
-
- container_id = get_docker_container_id()
+ self.port = 8888
- if container_id:
- self.host = self._get_simulator_address()
- display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
- else:
- self.host = get_docker_hostname()
+ ports = [
+ self.port,
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=True,
+ )
- self.port = 8888
- self.endpoint = 'http://%s:%d' % (self.host, self.port)
+ descriptor.register(self.args)
- self._wait_for_service()
+ # apply work-around for OverlayFS issue
+ # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
+ docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'])
if self.args.explain:
values = dict(
@@ -189,17 +114,10 @@ class CsCloudProvider(CloudProvider):
PORT=str(self.port),
)
else:
- credentials = self._get_credentials()
-
- if self.args.docker:
- host = self.DOCKER_SIMULATOR_NAME
- elif self.args.remote:
- host = 'localhost'
- else:
- host = self.host
+ credentials = self._get_credentials(self.DOCKER_SIMULATOR_NAME)
values = dict(
- HOST=host,
+ HOST=self.DOCKER_SIMULATOR_NAME,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
@@ -211,62 +129,23 @@ class CsCloudProvider(CloudProvider):
self._write_config(config)
- def _get_simulator_address(self):
- current_network = get_docker_preferred_network_name(self.args)
- networks = docker_network_inspect(self.args, current_network)
-
- try:
- network = [network for network in networks if network['Name'] == current_network][0]
- containers = network['Containers']
- container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
- return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
- except Exception:
- display.error('Failed to process the following docker network inspect output:\n%s' %
- json.dumps(networks, indent=4, sort_keys=True))
- raise
-
- def _wait_for_service(self):
- """Wait for the CloudStack service endpoint to accept connections."""
- if self.args.explain:
- return
-
- client = HttpClient(self.args, always=True)
- endpoint = self.endpoint
-
- for _iteration in range(1, 30):
- display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
-
- try:
- client.get(endpoint)
- return
- except SubprocessError:
- pass
-
- time.sleep(10)
-
- raise ApplicationError('Timeout waiting for CloudStack service.')
-
- def _get_credentials(self):
+ def _get_credentials(self, container_name):
"""Wait for the CloudStack simulator to return credentials.
+ :type container_name: str
:rtype: dict[str, str]
"""
- client = HttpClient(self.args, always=True)
- endpoint = '%s/admin.json' % self.endpoint
-
- for _iteration in range(1, 30):
- display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
-
- response = client.get(endpoint)
+ def check(value):
+ # noinspection PyBroadException
+ try:
+ json.loads(value)
+ except Exception: # pylint: disable=broad-except
+ return False # sometimes the file exists but is not yet valid JSON
- if response.status_code == 200:
- try:
- return response.json()
- except HttpError as ex:
- display.error(ex)
+ return True
- time.sleep(10)
+ stdout = wait_for_file(self.args, container_name, '/var/www/html/admin.json', sleep=10, tries=30, check=check)
- raise ApplicationError('Timeout waiting for CloudStack credentials.')
+ return json.loads(stdout)
class CsCloudEnvironment(CloudEnvironment):
diff --git a/test/lib/ansible_test/_internal/cloud/foreman.py b/test/lib/ansible_test/_internal/cloud/foreman.py
index 7517f1f618..4d388962ab 100644
--- a/test/lib/ansible_test/_internal/cloud/foreman.py
+++ b/test/lib/ansible_test/_internal/cloud/foreman.py
@@ -10,21 +10,8 @@ from . import (
CloudEnvironmentConfig,
)
-from ..util import (
- find_executable,
- display,
-)
-
-from ..docker_util import (
- docker_run,
- docker_rm,
- docker_inspect,
- docker_pull,
- get_docker_container_id,
- get_docker_hostname,
- get_docker_container_ip,
- get_docker_preferred_network_name,
- is_docker_user_defined_network,
+from ..containers import (
+ run_support_container,
)
@@ -61,30 +48,8 @@ class ForemanProvider(CloudProvider):
"""
self.image = self.__container_from_env or self.DOCKER_IMAGE
- self.container_name = ''
-
- def filter(self, targets, exclude):
- """Filter out the tests with the necessary config and res unavailable.
-
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- docker_cmd = 'docker'
- docker = find_executable(docker_cmd, required=False)
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
-
- if skipped:
- exclude.append(skip)
- display.warning(
- 'Excluding tests marked "%s" '
- 'which require the "%s" command: %s'
- % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
- )
+ self.uses_docker = True
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
@@ -95,81 +60,31 @@ class ForemanProvider(CloudProvider):
else:
self._setup_dynamic()
- def get_docker_run_options(self):
- """Get additional options needed when delegating tests to a container.
-
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if self.managed and not is_docker_user_defined_network(network):
- return ['--link', self.DOCKER_SIMULATOR_NAME]
-
- return []
-
- def cleanup(self):
- """Clean up the resource and temporary configs files after tests."""
- if self.container_name:
- docker_rm(self.args, self.container_name)
-
- super(ForemanProvider, self).cleanup()
-
def _setup_dynamic(self):
"""Spawn a Foreman stub within docker container."""
foreman_port = 8080
- container_id = get_docker_container_id()
-
- self.container_name = self.DOCKER_SIMULATOR_NAME
- results = docker_inspect(self.args, self.container_name)
-
- if results and not results[0].get('State', {}).get('Running'):
- docker_rm(self.args, self.container_name)
- results = []
-
- display.info(
- '%s Foreman simulator docker container.'
- % ('Using the existing' if results else 'Starting a new'),
- verbosity=1,
+ ports = [
+ foreman_port,
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=True,
)
- if not results:
- if self.args.docker or container_id:
- publish_ports = []
- else:
- # publish the simulator ports when not running inside docker
- publish_ports = [
- '-p', ':'.join((str(foreman_port), ) * 2),
- ]
-
- if not self.__container_from_env:
- docker_pull(self.args, self.image)
-
- docker_run(
- self.args,
- self.image,
- ['-d', '--name', self.container_name] + publish_ports,
- )
-
- if self.args.docker:
- foreman_host = self.DOCKER_SIMULATOR_NAME
- elif container_id:
- foreman_host = self._get_simulator_address()
- display.info(
- 'Found Foreman simulator container address: %s'
- % foreman_host, verbosity=1
- )
- else:
- foreman_host = get_docker_hostname()
+ descriptor.register(self.args)
- self._set_cloud_config('FOREMAN_HOST', foreman_host)
+ self._set_cloud_config('FOREMAN_HOST', self.DOCKER_SIMULATOR_NAME)
self._set_cloud_config('FOREMAN_PORT', str(foreman_port))
- def _get_simulator_address(self):
- return get_docker_container_ip(self.args, self.container_name)
-
def _setup_static(self):
- raise NotImplementedError
+ raise NotImplementedError()
class ForemanEnvironment(CloudEnvironment):
diff --git a/test/lib/ansible_test/_internal/cloud/galaxy.py b/test/lib/ansible_test/_internal/cloud/galaxy.py
index c045a362c4..93ed41eb60 100644
--- a/test/lib/ansible_test/_internal/cloud/galaxy.py
+++ b/test/lib/ansible_test/_internal/cloud/galaxy.py
@@ -11,23 +11,12 @@ from . import (
CloudEnvironmentConfig,
)
-from ..util import (
- find_executable,
- display,
+from ..docker_util import (
+ docker_cp_to,
)
-from ..docker_util import (
- docker_command,
- docker_run,
- docker_start,
- docker_rm,
- docker_inspect,
- docker_pull,
- get_docker_container_id,
- get_docker_hostname,
- get_docker_container_ip,
- get_docker_preferred_network_name,
- is_docker_user_defined_network,
+from ..containers import (
+ run_support_container,
)
@@ -103,68 +92,35 @@ class GalaxyProvider(CloudProvider):
'docker.io/pulp/pulp-galaxy-ng@sha256:b79a7be64eff86d8f58db9ca83ed4967bd8b4e45c99addb17a91d11926480cf1'
)
- self.containers = []
-
- def filter(self, targets, exclude):
- """Filter out the tests with the necessary config and res unavailable.
-
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- docker_cmd = 'docker'
- docker = find_executable(docker_cmd, required=False)
-
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
-
- if skipped:
- exclude.append(skip)
- display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
- % (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
+ self.uses_docker = True
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
super(GalaxyProvider, self).setup()
- container_id = get_docker_container_id()
-
- p_results = docker_inspect(self.args, 'ansible-ci-pulp')
-
- if p_results and not p_results[0].get('State', {}).get('Running'):
- docker_rm(self.args, 'ansible-ci-pulp')
- p_results = []
-
- display.info('%s ansible-ci-pulp docker container.'
- % ('Using the existing' if p_results else 'Starting a new'),
- verbosity=1)
-
galaxy_port = 80
+ pulp_host = 'ansible-ci-pulp'
pulp_port = 24817
- if not p_results:
- if self.args.docker or container_id:
- publish_ports = []
- else:
- # publish the simulator ports when not running inside docker
- publish_ports = [
- '-p', ':'.join((str(galaxy_port),) * 2),
- '-p', ':'.join((str(pulp_port),) * 2),
- ]
-
- docker_pull(self.args, self.pulp)
-
- # Create the container, don't run it, we need to inject configs before it starts
- stdout, _dummy = docker_run(
- self.args,
- self.pulp,
- ['--name', 'ansible-ci-pulp'] + publish_ports,
- create_only=True
- )
+ ports = [
+ galaxy_port,
+ pulp_port,
+ ]
+
+ # Create the container, don't run it, we need to inject configs before it starts
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.pulp,
+ pulp_host,
+ ports,
+ start=False,
+ allow_existing=True,
+ cleanup=None,
+ )
- pulp_id = stdout.strip()
+ if not descriptor.running:
+ pulp_id = descriptor.container_id
injected_files = {
'/etc/pulp/settings.py': SETTINGS,
@@ -175,20 +131,11 @@ class GalaxyProvider(CloudProvider):
with tempfile.NamedTemporaryFile() as temp_fd:
temp_fd.write(content)
temp_fd.flush()
- docker_command(self.args, ['cp', temp_fd.name, '%s:%s' % (pulp_id, path)])
+ docker_cp_to(self.args, pulp_id, temp_fd.name, path)
- # Start the container
- docker_start(self.args, 'ansible-ci-pulp', [])
+ descriptor.start(self.args)
- self.containers.append('ansible-ci-pulp')
-
- if self.args.docker:
- pulp_host = 'ansible-ci-pulp'
- elif container_id:
- pulp_host = self._get_simulator_address('ansible-ci-pulp')
- display.info('Found Galaxy simulator container address: %s' % pulp_host, verbosity=1)
- else:
- pulp_host = get_docker_hostname()
+ descriptor.register(self.args)
self._set_cloud_config('PULP_HOST', pulp_host)
self._set_cloud_config('PULP_PORT', str(pulp_port))
@@ -196,28 +143,6 @@ class GalaxyProvider(CloudProvider):
self._set_cloud_config('PULP_USER', 'admin')
self._set_cloud_config('PULP_PASSWORD', 'password')
- def get_docker_run_options(self):
- """Get additional options needed when delegating tests to a container.
-
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if not is_docker_user_defined_network(network):
- return ['--link', 'ansible-ci-pulp']
-
- return []
-
- def cleanup(self):
- """Clean up the resource and temporary configs files after tests."""
- for container_name in self.containers:
- docker_rm(self.args, container_name)
-
- super(GalaxyProvider, self).cleanup()
-
- def _get_simulator_address(self, container_name):
- return get_docker_container_ip(self.args, container_name)
-
class GalaxyEnvironment(CloudEnvironment):
"""Galaxy environment plugin.
diff --git a/test/lib/ansible_test/_internal/cloud/gcp.py b/test/lib/ansible_test/_internal/cloud/gcp.py
index c8de18357c..0a73f724ad 100644
--- a/test/lib/ansible_test/_internal/cloud/gcp.py
+++ b/test/lib/ansible_test/_internal/cloud/gcp.py
@@ -4,8 +4,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import os
-
from ..util import (
display,
ConfigParser,
@@ -20,17 +18,13 @@ from . import (
class GcpCloudProvider(CloudProvider):
"""GCP cloud provider plugin. Sets up cloud resources before delegation."""
-
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
+ def __init__(self, args):
+ """Set up container references for provider.
+ :type args: TestConfig
"""
+ super(GcpCloudProvider, self).__init__(args)
- if os.path.isfile(self.config_static_path):
- return
-
- super(GcpCloudProvider, self).filter(targets, exclude)
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
diff --git a/test/lib/ansible_test/_internal/cloud/hcloud.py b/test/lib/ansible_test/_internal/cloud/hcloud.py
index 3c422fb49f..dd89366a2c 100644
--- a/test/lib/ansible_test/_internal/cloud/hcloud.py
+++ b/test/lib/ansible_test/_internal/cloud/hcloud.py
@@ -2,8 +2,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import os
-
from ..util import (
display,
ConfigParser,
@@ -31,14 +29,13 @@ class HcloudCloudProvider(CloudProvider):
"""
super(HcloudCloudProvider, self).__init__(args)
+ self.uses_config = True
+
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
- if os.path.isfile(self.config_static_path):
- return
-
aci = self._create_ansible_core_ci()
if aci.available:
diff --git a/test/lib/ansible_test/_internal/cloud/httptester.py b/test/lib/ansible_test/_internal/cloud/httptester.py
new file mode 100644
index 0000000000..c6ca0284fd
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/httptester.py
@@ -0,0 +1,92 @@
+"""HTTP Tester plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ display,
+ generate_password,
+)
+
+from ..config import (
+ IntegrationConfig,
+)
+
+from ..containers import (
+ run_support_container,
+)
+
+KRB5_PASSWORD_ENV = 'KRB5_PASSWORD'
+
+
+class HttptesterProvider(CloudProvider):
+ """HTTP Tester provider plugin. Sets up resources before delegation."""
+ def __init__(self, args): # type: (IntegrationConfig) -> None
+ super(HttptesterProvider, self).__init__(args)
+
+ self.image = os.environ.get('ANSIBLE_HTTP_TEST_CONTAINER', 'quay.io/ansible/http-test-container:1.3.0')
+
+ self.uses_docker = True
+
+ def setup(self): # type: () -> None
+ """Setup resources before delegation."""
+ super(HttptesterProvider, self).setup()
+
+ ports = [
+ 80,
+ 88,
+ 443,
+ 444,
+ 749,
+ ]
+
+ aliases = [
+ 'ansible.http.tests',
+ 'sni1.ansible.http.tests',
+ 'fail.ansible.http.tests',
+ 'self-signed.ansible.http.tests',
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ 'http-test-container',
+ ports,
+ aliases=aliases,
+ start=True,
+ allow_existing=True,
+ cleanup=True,
+ env={
+ KRB5_PASSWORD_ENV: generate_password(),
+ },
+ )
+
+ descriptor.register(self.args)
+
+ # Read the password from the container environment.
+ # This allows the tests to work when re-using an existing container.
+ # The password is marked as sensitive, since it may differ from the one we generated.
+ krb5_password = descriptor.details.container.env_dict()[KRB5_PASSWORD_ENV]
+ display.sensitive.add(krb5_password)
+
+ self._set_cloud_config(KRB5_PASSWORD_ENV, krb5_password)
+
+
+class HttptesterEnvironment(CloudEnvironment):
+ """HTTP Tester environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self): # type: () -> CloudEnvironmentConfig
+ """Returns the cloud environment config."""
+ return CloudEnvironmentConfig(
+ env_vars=dict(
+ HTTPTESTER='1', # backwards compatibility for tests intended to work with or without HTTP Tester
+ KRB5_PASSWORD=self._get_cloud_config(KRB5_PASSWORD_ENV),
+ )
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/nios.py b/test/lib/ansible_test/_internal/cloud/nios.py
index b9a1a4e4be..813c30b5c9 100644
--- a/test/lib/ansible_test/_internal/cloud/nios.py
+++ b/test/lib/ansible_test/_internal/cloud/nios.py
@@ -10,21 +10,8 @@ from . import (
CloudEnvironmentConfig,
)
-from ..util import (
- find_executable,
- display,
-)
-
-from ..docker_util import (
- docker_run,
- docker_rm,
- docker_inspect,
- docker_pull,
- get_docker_container_id,
- get_docker_hostname,
- get_docker_container_ip,
- get_docker_preferred_network_name,
- is_docker_user_defined_network,
+from ..containers import (
+ run_support_container,
)
@@ -48,7 +35,6 @@ class NiosProvider(CloudProvider):
def __init__(self, args):
"""Set up container references for provider.
-
:type args: TestConfig
"""
super(NiosProvider, self).__init__(args)
@@ -61,30 +47,8 @@ class NiosProvider(CloudProvider):
"""
self.image = self.__container_from_env or self.DOCKER_IMAGE
- self.container_name = ''
-
- def filter(self, targets, exclude):
- """Filter out the tests with the necessary config and res unavailable.
-
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- docker_cmd = 'docker'
- docker = find_executable(docker_cmd, required=False)
-
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
- if skipped:
- exclude.append(skip)
- display.warning(
- 'Excluding tests marked "%s" '
- 'which require the "%s" command: %s'
- % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
- )
+ self.uses_docker = True
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
@@ -95,80 +59,30 @@ class NiosProvider(CloudProvider):
else:
self._setup_dynamic()
- def get_docker_run_options(self):
- """Get additional options needed when delegating tests to a container.
-
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if self.managed and not is_docker_user_defined_network(network):
- return ['--link', self.DOCKER_SIMULATOR_NAME]
-
- return []
-
- def cleanup(self):
- """Clean up the resource and temporary configs files after tests."""
- if self.container_name:
- docker_rm(self.args, self.container_name)
-
- super(NiosProvider, self).cleanup()
-
def _setup_dynamic(self):
"""Spawn a NIOS simulator within docker container."""
nios_port = 443
- container_id = get_docker_container_id()
- self.container_name = self.DOCKER_SIMULATOR_NAME
-
- results = docker_inspect(self.args, self.container_name)
-
- if results and not results[0].get('State', {}).get('Running'):
- docker_rm(self.args, self.container_name)
- results = []
-
- display.info(
- '%s NIOS simulator docker container.'
- % ('Using the existing' if results else 'Starting a new'),
- verbosity=1,
+ ports = [
+ nios_port,
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=True,
)
- if not results:
- if self.args.docker or container_id:
- publish_ports = []
- else:
- # publish the simulator ports when not running inside docker
- publish_ports = [
- '-p', ':'.join((str(nios_port), ) * 2),
- ]
-
- if not self.__container_from_env:
- docker_pull(self.args, self.image)
-
- docker_run(
- self.args,
- self.image,
- ['-d', '--name', self.container_name] + publish_ports,
- )
-
- if self.args.docker:
- nios_host = self.DOCKER_SIMULATOR_NAME
- elif container_id:
- nios_host = self._get_simulator_address()
- display.info(
- 'Found NIOS simulator container address: %s'
- % nios_host, verbosity=1
- )
- else:
- nios_host = get_docker_hostname()
-
- self._set_cloud_config('NIOS_HOST', nios_host)
+ descriptor.register(self.args)
- def _get_simulator_address(self):
- return get_docker_container_ip(self.args, self.container_name)
+ self._set_cloud_config('NIOS_HOST', self.DOCKER_SIMULATOR_NAME)
def _setup_static(self):
- raise NotImplementedError
+ raise NotImplementedError()
class NiosEnvironment(CloudEnvironment):
diff --git a/test/lib/ansible_test/_internal/cloud/opennebula.py b/test/lib/ansible_test/_internal/cloud/opennebula.py
index 559093e3d8..42dbfac224 100644
--- a/test/lib/ansible_test/_internal/cloud/opennebula.py
+++ b/test/lib/ansible_test/_internal/cloud/opennebula.py
@@ -16,10 +16,6 @@ from ..util import (
class OpenNebulaCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
-
- def filter(self, targets, exclude):
- """ no need to filter modules, they can either run from config file or from fixtures"""
-
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(OpenNebulaCloudProvider, self).setup()
@@ -27,6 +23,8 @@ class OpenNebulaCloudProvider(CloudProvider):
if not self._use_static_config():
self._setup_dynamic()
+ self.uses_config = True
+
def _setup_dynamic(self):
display.info('No config file provided, will run test from fixtures')
diff --git a/test/lib/ansible_test/_internal/cloud/openshift.py b/test/lib/ansible_test/_internal/cloud/openshift.py
index 450816bf3e..0d73a4c5f3 100644
--- a/test/lib/ansible_test/_internal/cloud/openshift.py
+++ b/test/lib/ansible_test/_internal/cloud/openshift.py
@@ -2,10 +2,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import json
-import os
import re
-import time
from . import (
CloudProvider,
@@ -18,27 +15,12 @@ from ..io import (
)
from ..util import (
- find_executable,
- ApplicationError,
display,
- SubprocessError,
)
-from ..http import (
- HttpClient,
-)
-
-from ..docker_util import (
- docker_exec,
- docker_run,
- docker_rm,
- docker_inspect,
- docker_pull,
- docker_network_inspect,
- get_docker_container_id,
- get_docker_preferred_network_name,
- get_docker_hostname,
- is_docker_user_defined_network,
+from ..containers import (
+ run_support_container,
+ wait_for_file,
)
@@ -54,28 +36,9 @@ class OpenShiftCloudProvider(CloudProvider):
# The image must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'openshift/origin:v3.9.0'
- self.container_name = ''
-
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- if os.path.isfile(self.config_static_path):
- return
-
- docker = find_executable('docker', required=False)
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
-
- if skipped:
- exclude.append(skip)
- display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
- % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ self.uses_docker = True
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
@@ -86,133 +49,52 @@ class OpenShiftCloudProvider(CloudProvider):
else:
self._setup_dynamic()
- def get_remote_ssh_options(self):
- """Get any additional options needed when delegating tests to a remote instance via SSH.
- :rtype: list[str]
- """
- if self.managed:
- return ['-R', '8443:%s:8443' % get_docker_hostname()]
-
- return []
-
- def get_docker_run_options(self):
- """Get any additional options needed when delegating tests to a docker container.
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if self.managed and not is_docker_user_defined_network(network):
- return ['--link', self.DOCKER_CONTAINER_NAME]
-
- return []
-
- def cleanup(self):
- """Clean up the cloud resource and any temporary configuration files after tests complete."""
- if self.container_name:
- docker_rm(self.args, self.container_name)
-
- super(OpenShiftCloudProvider, self).cleanup()
-
def _setup_static(self):
"""Configure OpenShift tests for use with static configuration."""
config = read_text_file(self.config_static_path)
match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
- if match:
- endpoint = match.group('server')
- self._wait_for_service(endpoint)
- else:
- display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.')
+ if not match:
+ display.warning('Could not find OpenShift endpoint in kubeconfig.')
def _setup_dynamic(self):
"""Create a OpenShift container using docker."""
- self.container_name = self.DOCKER_CONTAINER_NAME
-
- results = docker_inspect(self.args, self.container_name)
-
- if results and not results[0]['State']['Running']:
- docker_rm(self.args, self.container_name)
- results = []
-
- if results:
- display.info('Using the existing OpenShift docker container.', verbosity=1)
- else:
- display.info('Starting a new OpenShift docker container.', verbosity=1)
- docker_pull(self.args, self.image)
- cmd = ['start', 'master', '--listen', 'https://0.0.0.0:8443']
- docker_run(self.args, self.image, ['-d', '-p', '8443:8443', '--name', self.container_name], cmd)
-
- container_id = get_docker_container_id()
-
- if container_id:
- host = self._get_container_address()
- display.info('Found OpenShift container address: %s' % host, verbosity=1)
- else:
- host = get_docker_hostname()
-
port = 8443
- endpoint = 'https://%s:%s/' % (host, port)
- self._wait_for_service(endpoint)
+ ports = [
+ port,
+ ]
+
+ cmd = ['start', 'master', '--listen', 'https://0.0.0.0:%d' % port]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_CONTAINER_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=True,
+ cmd=cmd,
+ )
+
+ descriptor.register(self.args)
if self.args.explain:
config = '# Unknown'
else:
- if self.args.docker:
- host = self.DOCKER_CONTAINER_NAME
- elif self.args.remote:
- host = 'localhost'
-
- server = 'https://%s:%s' % (host, port)
- config = self._get_config(server)
+ config = self._get_config(self.DOCKER_CONTAINER_NAME, 'https://%s:%s/' % (self.DOCKER_CONTAINER_NAME, port))
self._write_config(config)
- def _get_container_address(self):
- current_network = get_docker_preferred_network_name(self.args)
- networks = docker_network_inspect(self.args, current_network)
-
- try:
- network = [network for network in networks if network['Name'] == current_network][0]
- containers = network['Containers']
- container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_CONTAINER_NAME][0]
- return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
- except Exception:
- display.error('Failed to process the following docker network inspect output:\n%s' %
- json.dumps(networks, indent=4, sort_keys=True))
- raise
-
- def _wait_for_service(self, endpoint):
- """Wait for the OpenShift service endpoint to accept connections.
- :type endpoint: str
- """
- if self.args.explain:
- return
-
- client = HttpClient(self.args, always=True, insecure=True)
-
- for dummy in range(1, 30):
- display.info('Waiting for OpenShift service: %s' % endpoint, verbosity=1)
-
- try:
- client.get(endpoint)
- return
- except SubprocessError:
- pass
-
- time.sleep(10)
-
- raise ApplicationError('Timeout waiting for OpenShift service.')
-
- def _get_config(self, server):
+ def _get_config(self, container_name, server):
"""Get OpenShift config from container.
+ :type container_name: str
:type server: str
:rtype: dict[str, str]
"""
- cmd = ['cat', '/var/lib/origin/openshift.local.config/master/admin.kubeconfig']
-
- stdout, dummy = docker_exec(self.args, self.container_name, cmd, capture=True)
+ stdout = wait_for_file(self.args, container_name, '/var/lib/origin/openshift.local.config/master/admin.kubeconfig', sleep=10, tries=30)
config = stdout
config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
diff --git a/test/lib/ansible_test/_internal/cloud/scaleway.py b/test/lib/ansible_test/_internal/cloud/scaleway.py
index 22abe197ba..19a412ca7f 100644
--- a/test/lib/ansible_test/_internal/cloud/scaleway.py
+++ b/test/lib/ansible_test/_internal/cloud/scaleway.py
@@ -25,15 +25,7 @@ class ScalewayCloudProvider(CloudProvider):
"""
super(ScalewayCloudProvider, self).__init__(args)
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- if os.path.isfile(self.config_static_path):
- return
-
- super(ScalewayCloudProvider, self).filter(targets, exclude)
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
diff --git a/test/lib/ansible_test/_internal/cloud/vcenter.py b/test/lib/ansible_test/_internal/cloud/vcenter.py
index 3b38a19ebc..b13dc851fa 100644
--- a/test/lib/ansible_test/_internal/cloud/vcenter.py
+++ b/test/lib/ansible_test/_internal/cloud/vcenter.py
@@ -11,22 +11,13 @@ from . import (
)
from ..util import (
- find_executable,
display,
ConfigParser,
ApplicationError,
)
-from ..docker_util import (
- docker_run,
- docker_rm,
- docker_inspect,
- docker_pull,
- get_docker_container_id,
- get_docker_hostname,
- get_docker_container_ip,
- get_docker_preferred_network_name,
- is_docker_user_defined_network,
+from ..containers import (
+ run_support_container,
)
@@ -45,44 +36,24 @@ class VcenterProvider(CloudProvider):
self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
else:
self.image = 'quay.io/ansible/vcenter-test-container:1.7.0'
- self.container_name = ''
# VMware tests can be run on govcsim or BYO with a static config file.
# The simulator is the default if no config is provided.
self.vmware_test_platform = os.environ.get('VMWARE_TEST_PLATFORM', 'govcsim')
- self.insecure = False
- self.proxy = None
- self.platform = 'vcenter'
-
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- if self.vmware_test_platform == 'govcsim' or (self.vmware_test_platform == '' and not os.path.isfile(self.config_static_path)):
- docker = find_executable('docker', required=False)
-
- if docker:
- return
-
- skip = 'cloud/%s/' % self.platform
- skipped = [target.name for target in targets if skip in target.aliases]
- if skipped:
- exclude.append(skip)
- display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
- % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ if self.vmware_test_platform == 'govcsim':
+ self.uses_docker = True
+ self.uses_config = False
elif self.vmware_test_platform == 'static':
- if os.path.isfile(self.config_static_path):
- return
-
- super(VcenterProvider, self).filter(targets, exclude)
+ self.uses_docker = False
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(VcenterProvider, self).setup()
self._set_cloud_config('vmware_test_platform', self.vmware_test_platform)
+
if self.vmware_test_platform == 'govcsim':
self._setup_dynamic_simulator()
self.managed = True
@@ -92,91 +63,33 @@ class VcenterProvider(CloudProvider):
else:
raise ApplicationError('Unknown vmware_test_platform: %s' % self.vmware_test_platform)
- def get_docker_run_options(self):
- """Get any additional options needed when delegating tests to a docker container.
- :rtype: list[str]
- """
- network = get_docker_preferred_network_name(self.args)
-
- if self.managed and not is_docker_user_defined_network(network):
- return ['--link', self.DOCKER_SIMULATOR_NAME]
-
- return []
-
- def cleanup(self):
- """Clean up the cloud resource and any temporary configuration files after tests complete."""
- if self.container_name:
- docker_rm(self.args, self.container_name)
-
- super(VcenterProvider, self).cleanup()
-
def _setup_dynamic_simulator(self):
"""Create a vcenter simulator using docker."""
- container_id = get_docker_container_id()
-
- self.container_name = self.DOCKER_SIMULATOR_NAME
-
- results = docker_inspect(self.args, self.container_name)
-
- if results and not results[0].get('State', {}).get('Running'):
- docker_rm(self.args, self.container_name)
- results = []
-
- if results:
- display.info('Using the existing vCenter simulator docker container.', verbosity=1)
- else:
- display.info('Starting a new vCenter simulator docker container.', verbosity=1)
-
- if not self.args.docker and not container_id:
- # publish the simulator ports when not running inside docker
- publish_ports = [
- '-p', '1443:443',
- '-p', '8080:8080',
- '-p', '8989:8989',
- '-p', '5000:5000', # control port for flask app in simulator
- ]
- else:
- publish_ports = []
-
- if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
- docker_pull(self.args, self.image)
-
- docker_run(
- self.args,
- self.image,
- ['-d', '--name', self.container_name] + publish_ports,
- )
-
- if self.args.docker:
- vcenter_hostname = self.DOCKER_SIMULATOR_NAME
- elif container_id:
- vcenter_hostname = self._get_simulator_address()
- display.info('Found vCenter simulator container address: %s' % vcenter_hostname, verbosity=1)
- else:
- vcenter_hostname = get_docker_hostname()
+ ports = [
+ 443,
+ 8080,
+ 8989,
+ 5000, # control port for flask app in simulator
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=True,
+ )
- self._set_cloud_config('vcenter_hostname', vcenter_hostname)
+ descriptor.register(self.args)
- def _get_simulator_address(self):
- return get_docker_container_ip(self.args, self.container_name)
+ self._set_cloud_config('vcenter_hostname', self.DOCKER_SIMULATOR_NAME)
def _setup_static(self):
if not os.path.exists(self.config_static_path):
raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
- parser = ConfigParser({
- 'vcenter_port': '443',
- 'vmware_proxy_host': '',
- 'vmware_proxy_port': '8080'})
- parser.read(self.config_static_path)
-
- if parser.get('DEFAULT', 'vmware_validate_certs').lower() in ('no', 'false'):
- self.insecure = True
- proxy_host = parser.get('DEFAULT', 'vmware_proxy_host')
- proxy_port = int(parser.get('DEFAULT', 'vmware_proxy_port'))
- if proxy_host and proxy_port:
- self.proxy = 'http://%s:%d' % (proxy_host, proxy_port)
-
class VcenterEnvironment(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
@@ -208,10 +121,6 @@ class VcenterEnvironment(CloudEnvironment):
vcenter_username='user',
vcenter_password='pass',
)
- # Shippable starts ansible-test from withing an existing container,
- # and in this case, we don't have to change the vcenter port.
- if not self.args.docker and not get_docker_container_id():
- ansible_vars['vcenter_port'] = '1443'
for key, value in ansible_vars.items():
if key.endswith('_password'):
diff --git a/test/lib/ansible_test/_internal/cloud/vultr.py b/test/lib/ansible_test/_internal/cloud/vultr.py
index ce6184f7ce..132f16ebd7 100644
--- a/test/lib/ansible_test/_internal/cloud/vultr.py
+++ b/test/lib/ansible_test/_internal/cloud/vultr.py
@@ -18,22 +18,13 @@ from ..util import (
class VultrCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
-
def __init__(self, args):
"""
:type args: TestConfig
"""
super(VultrCloudProvider, self).__init__(args)
- def filter(self, targets, exclude):
- """Filter out the cloud tests when the necessary config and resources are not available.
- :type targets: tuple[TestTarget]
- :type exclude: list[str]
- """
- if os.path.isfile(self.config_static_path):
- return
-
- super(VultrCloudProvider, self).filter(targets, exclude)
+ self.uses_config = True
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py
index eb9c1739a9..8ebfca0615 100644
--- a/test/lib/ansible_test/_internal/config.py
+++ b/test/lib/ansible_test/_internal/config.py
@@ -9,7 +9,6 @@ from . import types as t
from .util import (
find_python,
- generate_password,
generate_pip_command,
ApplicationError,
)
@@ -126,13 +125,7 @@ class EnvironmentConfig(CommonConfig):
if self.delegate:
self.requirements = True
- self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool
- self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str
- krb5_password = args.httptester_krb5_password if 'httptester_krb5_password' in args else ''
- self.httptester_krb5_password = krb5_password or generate_password() # type: str
-
- if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled':
- self.httptester = False
+ self.containers = args.containers # type: t.Optional[t.Dict[str, t.Dict[str, t.Dict[str, t.Any]]]]
if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled':
self.pip_check = False
@@ -233,9 +226,6 @@ class ShellConfig(EnvironmentConfig):
self.raw = args.raw # type: bool
- if self.raw:
- self.httptester = False
-
class SanityConfig(TestConfig):
"""Configuration for the sanity command."""
diff --git a/test/lib/ansible_test/_internal/containers.py b/test/lib/ansible_test/_internal/containers.py
new file mode 100644
index 0000000000..fbba0a2382
--- /dev/null
+++ b/test/lib/ansible_test/_internal/containers.py
@@ -0,0 +1,755 @@
+"""High level functions for working with containers."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import contextlib
+import json
+import random
+import time
+import uuid
+
+from . import types as t
+
+from .encoding import (
+ Text,
+)
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+ get_host_ip,
+ sanitize_host_name,
+)
+
+from .util_common import (
+ named_temporary_file,
+)
+
+from .config import (
+ EnvironmentConfig,
+ IntegrationConfig,
+ WindowsIntegrationConfig,
+)
+
+from .docker_util import (
+ ContainerNotFoundError,
+ DockerInspect,
+ docker_exec,
+ docker_inspect,
+ docker_pull,
+ docker_rm,
+ docker_run,
+ docker_start,
+ get_docker_command,
+ get_docker_container_id,
+ get_docker_host_ip,
+)
+
+from .ansible_util import (
+ run_playbook,
+)
+
+from .core_ci import (
+ SshKey,
+)
+
+from .target import (
+ IntegrationTarget,
+)
+
+from .ssh import (
+ SshConnectionDetail,
+ SshProcess,
+ create_ssh_port_forwards,
+ create_ssh_port_redirects,
+ generate_ssh_inventory,
+)
+
+# information about support containers provisioned by the current ansible-test instance
+support_containers = {} # type: t.Dict[str, ContainerDescriptor]
+
+
+class HostType:
+ """Enum representing the types of hosts involved in running tests."""
+ origin = 'origin'
+ control = 'control'
+ managed = 'managed'
+
+
+def run_support_container(
+ args, # type: EnvironmentConfig
+ context, # type: str
+ image, # type: str
+ name, # type: name
+ ports, # type: t.List[int]
+ aliases=None, # type: t.Optional[t.List[str]]
+ start=True, # type: bool
+ allow_existing=False, # type: bool
+ cleanup=None, # type: t.Optional[bool]
+ cmd=None, # type: t.Optional[t.List[str]]
+ env=None, # type: t.Optional[t.Dict[str, str]]
+): # type: (...) -> ContainerDescriptor
+ """
+ Start a container used to support tests, but not run them.
+ Containers created this way will be accessible from tests.
+ """
+ if name in support_containers:
+ raise Exception('Container already defined: %s' % name)
+
+ # SSH is required for publishing ports, as well as modifying the hosts file.
+ # Initializing the SSH key here makes sure it is available for use after delegation.
+ SshKey(args)
+
+ aliases = aliases or [sanitize_host_name(name)]
+
+ current_container_id = get_docker_container_id()
+
+ publish_ports = True
+ docker_command = get_docker_command().command
+
+ if docker_command == 'docker':
+ if args.docker:
+ publish_ports = False # publishing ports is not needed when test hosts are on the docker network
+
+ if current_container_id:
+ publish_ports = False # publishing ports is pointless if already running in a docker container
+
+ options = ['--name', name]
+
+ if start:
+ options.append('-d')
+
+ if publish_ports:
+ for port in ports:
+ options.extend(['-p', str(port)])
+
+ if env:
+ for key, value in env.items():
+ options.extend(['--env', '%s=%s' % (key, value)])
+
+ support_container_id = None
+
+ if allow_existing:
+ try:
+ container = docker_inspect(args, name)
+ except ContainerNotFoundError:
+ container = None
+
+ if container:
+ support_container_id = container.id
+
+ if not container.running:
+ display.info('Ignoring existing "%s" container which is not running.' % name, verbosity=1)
+ support_container_id = None
+ elif not container.image:
+ display.info('Ignoring existing "%s" container which has the wrong image.' % name, verbosity=1)
+ support_container_id = None
+ elif publish_ports and not all(port and len(port) == 1 for port in [container.get_tcp_port(port) for port in ports]):
+ display.info('Ignoring existing "%s" container which does not have the required published ports.' % name, verbosity=1)
+ support_container_id = None
+
+ if not support_container_id:
+ docker_rm(args, name)
+
+ if support_container_id:
+ display.info('Using existing "%s" container.' % name)
+ running = True
+ existing = True
+ else:
+ display.info('Starting new "%s" container.' % name)
+ docker_pull(args, image)
+ support_container_id = docker_run(args, image, options, create_only=not start, cmd=cmd)
+ running = start
+ existing = False
+
+ if cleanup is None:
+ cleanup = not existing
+
+ descriptor = ContainerDescriptor(
+ image,
+ context,
+ name,
+ support_container_id,
+ ports,
+ aliases,
+ publish_ports,
+ running,
+ existing,
+ cleanup,
+ env,
+ )
+
+ if not support_containers:
+ atexit.register(cleanup_containers, args)
+
+ support_containers[name] = descriptor
+
+ return descriptor
+
+
+def get_container_database(args): # type: (EnvironmentConfig) -> ContainerDatabase
+ """Return the current container database, creating it as needed, or returning the one provided on the command line through delegation."""
+ if not args.containers:
+ args.containers = create_container_database(args)
+ elif isinstance(args.containers, (str, bytes, Text)):
+ args.containers = ContainerDatabase.from_dict(json.loads(args.containers))
+
+ display.info('>>> Container Database\n%s' % json.dumps(args.containers.to_dict(), indent=4, sort_keys=True), verbosity=3)
+
+ return args.containers
+
+
+class ContainerAccess:
+ """Information needed for one test host to access a single container supporting tests."""
+ def __init__(self, host_ip, names, ports, forwards): # type: (str, t.List[str], t.Optional[t.List[int]], t.Optional[t.Dict[int, int]]) -> None
+ # if forwards is set
+ # this is where forwards are sent (it is the host that provides an indirect connection to the containers on alternate ports)
+ # /etc/hosts uses 127.0.0.1 (since port redirection will be used)
+ # else
+ # this is what goes into /etc/hosts (it is the container's direct IP)
+ self.host_ip = host_ip
+
+ # primary name + any aliases -- these go into the hosts file and reference the appropriate ip for the origin/control/managed host
+ self.names = names
+
+ # ports available (set if forwards is not set)
+ self.ports = ports
+
+ # port redirections to create through host_ip -- if not set, no port redirections will be used
+ self.forwards = forwards
+
+ def port_map(self): # type: () -> t.List[t.Tuple[int, int]]
+ """Return a port map for accessing this container."""
+ if self.forwards:
+ ports = list(self.forwards.items())
+ else:
+ ports = [(port, port) for port in self.ports]
+
+ return ports
+
+ @staticmethod
+ def from_dict(data): # type: (t.Dict[str, t.Any]) -> ContainerAccess
+ """Return a ContainerAccess instance from the given dict."""
+ forwards = data.get('forwards')
+
+ if forwards:
+ forwards = dict((int(key), value) for key, value in forwards.items())
+
+ return ContainerAccess(
+ host_ip=data['host_ip'],
+ names=data['names'],
+ ports=data.get('ports'),
+ forwards=forwards,
+ )
+
+ def to_dict(self): # type: () -> t.Dict[str, t.Any]
+ """Return a dict of the current instance."""
+ value = dict(
+ host_ip=self.host_ip,
+ names=self.names,
+ )
+
+ if self.ports:
+ value.update(ports=self.ports)
+
+ if self.forwards:
+ value.update(forwards=self.forwards)
+
+ return value
+
+
+class ContainerDatabase:
+ """Database of running containers used to support tests."""
+ def __init__(self, data): # type: (t.Dict[str, t.Dict[str, t.Dict[str, ContainerAccess]]]) -> None
+ self.data = data
+
+ @staticmethod
+ def from_dict(data): # type: (t.Dict[str, t.Any]) -> ContainerDatabase
+ """Return a ContainerDatabase instance from the given dict."""
+ return ContainerDatabase(dict((access_name,
+ dict((context_name,
+ dict((container_name, ContainerAccess.from_dict(container))
+ for container_name, container in containers.items()))
+ for context_name, containers in contexts.items()))
+ for access_name, contexts in data.items()))
+
+ def to_dict(self): # type: () -> t.Dict[str, t.Any]
+ """Return a dict of the current instance."""
+ return dict((access_name,
+ dict((context_name,
+ dict((container_name, container.to_dict())
+ for container_name, container in containers.items()))
+ for context_name, containers in contexts.items()))
+ for access_name, contexts in self.data.items())
+
+
+def local_ssh(args): # type: (EnvironmentConfig) -> SshConnectionDetail
+ """Return SSH connection details for localhost, connecting as root to the default SSH port."""
+ return SshConnectionDetail('localhost', 'localhost', None, 'root', SshKey(args).key, args.python_executable)
+
+
+def create_container_database(args): # type: (EnvironmentConfig) -> ContainerDatabase
+ """Create and return a container database with information necessary for all test hosts to make use of relevant support containers."""
+ origin = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]]
+ control = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]]
+ managed = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]]
+
+ for name, container in support_containers.items():
+ if container.details.published_ports:
+ published_access = ContainerAccess(
+ host_ip=get_docker_host_ip(),
+ names=container.aliases,
+ ports=None,
+ forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
+ )
+ else:
+ published_access = None # no published access without published ports (ports are only published if needed)
+
+ if container.details.container_ip:
+ # docker containers, and rootfull podman containers should have a container IP address
+ container_access = ContainerAccess(
+ host_ip=container.details.container_ip,
+ names=container.aliases,
+ ports=container.ports,
+ forwards=None,
+ )
+ elif get_docker_command().command == 'podman':
+ # published ports for rootless podman containers should be accessible from the host's IP
+ container_access = ContainerAccess(
+ host_ip=get_host_ip(),
+ names=container.aliases,
+ ports=None,
+ forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
+ )
+ else:
+ container_access = None # no container access without an IP address
+
+ if get_docker_container_id():
+ if not container_access:
+ raise Exception('Missing IP address for container: %s' % name)
+
+ origin_context = origin.setdefault(container.context, {})
+ origin_context[name] = container_access
+ elif not published_access:
+ pass # origin does not have network access to the containers
+ else:
+ origin_context = origin.setdefault(container.context, {})
+ origin_context[name] = published_access
+
+ if args.remote:
+ pass # SSH forwarding required
+ elif args.docker or get_docker_container_id():
+ if container_access:
+ control_context = control.setdefault(container.context, {})
+ control_context[name] = container_access
+ else:
+ raise Exception('Missing IP address for container: %s' % name)
+ else:
+ if not published_access:
+ raise Exception('Missing published ports for container: %s' % name)
+
+ control_context = control.setdefault(container.context, {})
+ control_context[name] = published_access
+
+ data = {
+ HostType.origin: origin,
+ HostType.control: control,
+ HostType.managed: managed,
+ }
+
+ data = dict((key, value) for key, value in data.items() if value)
+
+ return ContainerDatabase(data)
+
+
+class SupportContainerContext:
+ """Context object for tracking information relating to access of support containers."""
+ def __init__(self, containers, process): # type: (ContainerDatabase, t.Optional[SshProcess]) -> None
+ self.containers = containers
+ self.process = process
+
+ def close(self): # type: () -> None
+ """Close the process maintaining the port forwards."""
+ if not self.process:
+ return # forwarding not in use
+
+ self.process.terminate()
+
+ display.info('Waiting for the session SSH port forwarding process to terminate.', verbosity=1)
+
+ self.process.wait()
+
+
+@contextlib.contextmanager
+def support_container_context(
+ args, # type: EnvironmentConfig
+ ssh, # type: t.Optional[SshConnectionDetail]
+): # type: (...) -> t.Optional[ContainerDatabase]
+ """Create a context manager for integration tests that use support containers."""
+ if not isinstance(args, IntegrationConfig):
+ yield None # containers are only used for integration tests
+ return
+
+ containers = get_container_database(args)
+
+ if not containers.data:
+ yield ContainerDatabase({}) # no containers are being used, return an empty database
+ return
+
+ context = create_support_container_context(args, ssh, containers)
+
+ try:
+ yield context.containers
+ finally:
+ context.close()
+
+
+def create_support_container_context(
+ args, # type: EnvironmentConfig
+ ssh, # type: t.Optional[SshConnectionDetail]
+ containers, # type: ContainerDatabase
+): # type: (...) -> SupportContainerContext
+ """Context manager that provides SSH port forwards. Returns updated container metadata."""
+ host_type = HostType.control
+
+ revised = ContainerDatabase(containers.data.copy())
+ source = revised.data.pop(HostType.origin, None)
+
+ container_map = {} # type: t.Dict[t.Tuple[str, int], t.Tuple[str, str, int]]
+
+ if host_type not in revised.data:
+ if not source:
+ raise Exception('Missing origin container details.')
+
+ for context_name, context in source.items():
+ for container_name, container in context.items():
+ for port, access_port in container.port_map():
+ container_map[(container.host_ip, access_port)] = (context_name, container_name, port)
+
+ if not container_map:
+ return SupportContainerContext(revised, None)
+
+ if not ssh:
+ raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
+
+ forwards = list(container_map.keys())
+ process = create_ssh_port_forwards(args, ssh, forwards)
+ result = SupportContainerContext(revised, process)
+
+ try:
+ port_forwards = process.collect_port_forwards()
+ contexts = {}
+
+ for forward, forwarded_port in port_forwards.items():
+ access_host, access_port = forward
+ context_name, container_name, container_port = container_map[(access_host, access_port)]
+ container = source[context_name][container_name]
+ context = contexts.setdefault(context_name, {})
+
+ forwarded_container = context.setdefault(container_name, ContainerAccess('127.0.0.1', container.names, None, {}))
+ forwarded_container.forwards[container_port] = forwarded_port
+
+ display.info('Container "%s" port %d available at %s:%d is forwarded over SSH as port %d.' % (
+ container_name, container_port, access_host, access_port, forwarded_port,
+ ), verbosity=1)
+
+ revised.data[host_type] = contexts
+
+ return result
+ except Exception:
+ result.close()
+ raise
+
+
+class ContainerDescriptor:
+ """Information about a support container."""
+ def __init__(self,
+ image, # type: str
+ context, # type: str
+ name, # type: str
+ container_id, # type: str
+ ports, # type: t.List[int]
+ aliases, # type: t.List[str]
+ publish_ports, # type: bool
+ running, # type: bool
+ existing, # type: bool
+ cleanup, # type: bool
+ env, # type: t.Optional[t.Dict[str, str]]
+ ): # type: (...) -> None
+ self.image = image
+ self.context = context
+ self.name = name
+ self.container_id = container_id
+ self.ports = ports
+ self.aliases = aliases
+ self.publish_ports = publish_ports
+ self.running = running
+ self.existing = existing
+ self.cleanup = cleanup
+ self.env = env
+ self.details = None # type: t.Optional[SupportContainer]
+
+ def start(self, args): # type: (EnvironmentConfig) -> None
+ """Start the container. Used for containers which are created, but not started."""
+ docker_start(args, self.name)
+
+ def register(self, args): # type: (EnvironmentConfig) -> SupportContainer
+ """Record the container's runtime details. Must be used after the container has been started."""
+ if self.details:
+ raise Exception('Container already registered: %s' % self.name)
+
+ try:
+ container = docker_inspect(args, self.container_id)
+ except ContainerNotFoundError:
+ if not args.explain:
+ raise
+
+ # provide enough mock data to keep --explain working
+ container = DockerInspect(args, dict(
+ Id=self.container_id,
+ NetworkSettings=dict(
+ IPAddress='127.0.0.1',
+ Ports=dict(('%d/tcp' % port, [dict(HostPort=random.randint(30000, 40000) if self.publish_ports else port)]) for port in self.ports),
+ ),
+ Config=dict(
+ Env=['%s=%s' % (key, value) for key, value in self.env.items()] if self.env else [],
+ ),
+ ))
+
+ support_container_ip = container.get_ip_address()
+
+ if self.publish_ports:
+ # inspect the support container to locate the published ports
+ tcp_ports = dict((port, container.get_tcp_port(port)) for port in self.ports)
+
+ if any(not config or len(config) != 1 for config in tcp_ports.values()):
+ raise ApplicationError('Unexpected `docker inspect` results for published TCP ports:\n%s' % json.dumps(tcp_ports, indent=4, sort_keys=True))
+
+ published_ports = dict((port, int(config[0]['HostPort'])) for port, config in tcp_ports.items())
+ else:
+ published_ports = {}
+
+ self.details = SupportContainer(
+ container,
+ support_container_ip,
+ published_ports,
+ )
+
+ return self.details
+
+
+class SupportContainer:
+ """Information about a running support container available for use by tests."""
+ def __init__(self,
+ container, # type: DockerInspect
+ container_ip, # type: str
+ published_ports, # type: t.Dict[int, int]
+ ): # type: (...) -> None
+ self.container = container
+ self.container_ip = container_ip
+ self.published_ports = published_ports
+
+
+def wait_for_file(args, # type: EnvironmentConfig
+ container_name, # type: str
+ path, # type: str
+ sleep, # type: int
+ tries, # type: int
+ check=None, # type: t.Optional[t.Callable[[str], bool]]
+ ): # type: (...) -> str
+ """Wait for the specified file to become available in the requested container and return its contents."""
+ display.info('Waiting for container "%s" to provide file: %s' % (container_name, path))
+
+ for _iteration in range(1, tries):
+ if _iteration > 1:
+ time.sleep(sleep)
+
+ try:
+ stdout = docker_exec(args, container_name, ['dd', 'if=%s' % path], capture=True)[0]
+ except SubprocessError:
+ continue
+
+ if not check or check(stdout):
+ return stdout
+
+ raise ApplicationError('Timeout waiting for container "%s" to provide file: %s' % (container_name, path))
+
+
+def cleanup_containers(args): # type: (EnvironmentConfig) -> None
+ """Clean up containers."""
+ for container in support_containers.values():
+ if container.cleanup:
+ docker_rm(args, container.container_id)
+ else:
+ display.notice('Remember to run `docker rm -f %s` when finished testing.' % container.name)
+
+
+def create_hosts_entries(context): # type: (t.Dict[str, ContainerAccess]) -> t.List[str]
+ """Return hosts entries for the specified context."""
+ entries = []
+ unique_id = uuid.uuid4()
+
+ for container in context.values():
+ # forwards require port redirection through localhost
+ if container.forwards:
+ host_ip = '127.0.0.1'
+ else:
+ host_ip = container.host_ip
+
+ entries.append('%s %s # ansible-test %s' % (host_ip, ' '.join(container.names), unique_id))
+
+ return entries
+
+
+def create_container_hooks(
+ args, # type: IntegrationConfig
+ managed_connections, # type: t.Optional[t.List[SshConnectionDetail]]
+): # type: (...) -> t.Tuple[t.Optional[t.Callable[[IntegrationTarget], None]], t.Optional[t.Callable[[IntegrationTarget], None]]]
+ """Return pre and post target callbacks for enabling and disabling container access for each test target."""
+ containers = get_container_database(args)
+
+ control_contexts = containers.data.get(HostType.control)
+
+ if control_contexts:
+ managed_contexts = containers.data.get(HostType.managed)
+
+ if not managed_contexts:
+ managed_contexts = create_managed_contexts(control_contexts)
+
+ control_type = 'posix'
+
+ if isinstance(args, WindowsIntegrationConfig):
+ managed_type = 'windows'
+ else:
+ managed_type = 'posix'
+
+ control_state = {}
+ managed_state = {}
+
+ control_connections = [local_ssh(args)]
+
+ def pre_target(target):
+ forward_ssh_ports(args, control_connections, '%s_hosts_prepare.yml' % control_type, control_state, target, HostType.control, control_contexts)
+ forward_ssh_ports(args, managed_connections, '%s_hosts_prepare.yml' % managed_type, managed_state, target, HostType.managed, managed_contexts)
+
+ def post_target(target):
+ cleanup_ssh_ports(args, control_connections, '%s_hosts_restore.yml' % control_type, control_state, target, HostType.control)
+ cleanup_ssh_ports(args, managed_connections, '%s_hosts_restore.yml' % managed_type, managed_state, target, HostType.managed)
+ else:
+ pre_target, post_target = None, None
+
+ return pre_target, post_target
+
+
+def create_managed_contexts(control_contexts): # type: (t.Dict[str, t.Dict[str, ContainerAccess]]) -> t.Dict[str, t.Dict[str, ContainerAccess]]
+ """Create managed contexts from the given control contexts."""
+ managed_contexts = {}
+
+ for context_name, control_context in control_contexts.items():
+ managed_context = managed_contexts[context_name] = {}
+
+ for container_name, control_container in control_context.items():
+ managed_context[container_name] = ContainerAccess(control_container.host_ip, control_container.names, None, dict(control_container.port_map()))
+
+ return managed_contexts
+
+
+def forward_ssh_ports(
+ args, # type: IntegrationConfig
+ ssh_connections, # type: t.Optional[t.List[SshConnectionDetail]]
+ playbook, # type: str
+ target_state, # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]]
+ target, # type: IntegrationTarget
+ host_type, # type: str
+ contexts, # type: t.Dict[str, t.Dict[str, ContainerAccess]]
+): # type: (...) -> None
+ """Configure port forwarding using SSH and write hosts file entries."""
+ if ssh_connections is None:
+ return
+
+ test_context = None
+
+ for context_name, context in contexts.items():
+ context_alias = 'cloud/%s/' % context_name
+
+ if context_alias in target.aliases:
+ test_context = context
+ break
+
+ if not test_context:
+ return
+
+ if not ssh_connections:
+ raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
+
+ redirects = [] # type: t.List[t.Tuple[int, str, int]]
+ messages = []
+
+ for container_name, container in test_context.items():
+ explain = []
+
+ for container_port, access_port in container.port_map():
+ if container.forwards:
+ redirects.append((container_port, container.host_ip, access_port))
+
+ explain.append('%d -> %s:%d' % (container_port, container.host_ip, access_port))
+ else:
+ explain.append('%s:%d' % (container.host_ip, container_port))
+
+ if explain:
+ if container.forwards:
+ message = 'Port forwards for the "%s" container have been established on the %s host' % (container_name, host_type)
+ else:
+ message = 'Ports for the "%s" container are available on the %s host as' % (container_name, host_type)
+
+ messages.append('%s:\n%s' % (message, '\n'.join(explain)))
+
+ hosts_entries = create_hosts_entries(test_context)
+ inventory = generate_ssh_inventory(ssh_connections)
+
+ with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path:
+ run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries))
+
+ ssh_processes = [] # type: t.List[SshProcess]
+
+ if redirects:
+ for ssh in ssh_connections:
+ ssh_processes.append(create_ssh_port_redirects(args, ssh, redirects))
+
+ target_state[target.name] = (hosts_entries, ssh_processes)
+
+ for message in messages:
+ display.info(message, verbosity=1)
+
+
+def cleanup_ssh_ports(
+ args, # type: IntegrationConfig
+ ssh_connections, # type: t.List[SshConnectionDetail]
+ playbook, # type: str
+ target_state, # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]]
+ target, # type: IntegrationTarget
+ host_type, # type: str
+): # type: (...) -> None
+ """Stop previously configured SSH port forwarding and remove previously written hosts file entries."""
+ state = target_state.pop(target.name, None)
+
+ if not state:
+ return
+
+ (hosts_entries, ssh_processes) = state
+
+ inventory = generate_ssh_inventory(ssh_connections)
+
+ with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path:
+ run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries))
+
+ if ssh_processes:
+ for process in ssh_processes:
+ process.terminate()
+
+ display.info('Waiting for the %s host SSH port forwarding processs(es) to terminate.' % host_type, verbosity=1)
+
+ for process in ssh_processes:
+ process.wait()
diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py
index a7e070661c..025969ca8c 100644
--- a/test/lib/ansible_test/_internal/core_ci.py
+++ b/test/lib/ansible_test/_internal/core_ci.py
@@ -567,6 +567,9 @@ class SshKey:
if not os.path.isfile(key) or not os.path.isfile(pub):
run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', self.KEY_TYPE, '-N', '', '-f', key])
+ if args.explain:
+ return key, pub
+
# newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
key_contents = read_text_file(key)
key_contents = re.sub(r'(BEGIN|END) PRIVATE KEY', r'\1 RSA PRIVATE KEY', key_contents)
diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py
index 250b9114af..692635d7c9 100644
--- a/test/lib/ansible_test/_internal/delegation.py
+++ b/test/lib/ansible_test/_internal/delegation.py
@@ -2,6 +2,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import json
import os
import re
import sys
@@ -16,11 +17,8 @@ from .io import (
from .executor import (
SUPPORTED_PYTHON_VERSIONS,
- HTTPTESTER_HOSTS,
create_shell_command,
- run_httptester,
run_pypi_proxy,
- start_httptester,
get_python_interpreter,
get_python_version,
)
@@ -69,24 +67,19 @@ from .util_common import (
from .docker_util import (
docker_exec,
docker_get,
+ docker_inspect,
docker_pull,
docker_put,
docker_rm,
docker_run,
- docker_available,
docker_network_disconnect,
- get_docker_networks,
- get_docker_preferred_network_name,
+ get_docker_command,
get_docker_hostname,
- is_docker_user_defined_network,
)
-from .cloud import (
- get_cloud_providers,
-)
-
-from .target import (
- IntegrationTarget,
+from .containers import (
+ SshConnectionDetail,
+ support_container_context,
)
from .data import (
@@ -119,12 +112,11 @@ def check_delegation_args(args):
get_python_version(args, get_remote_completion(), args.remote)
-def delegate(args, exclude, require, integration_targets):
+def delegate(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
- :type integration_targets: tuple[IntegrationTarget]
:rtype: bool
"""
if isinstance(args, TestConfig):
@@ -137,31 +129,30 @@ def delegate(args, exclude, require, integration_targets):
args.metadata.to_file(args.metadata_path)
try:
- return delegate_command(args, exclude, require, integration_targets)
+ return delegate_command(args, exclude, require)
finally:
args.metadata_path = None
else:
- return delegate_command(args, exclude, require, integration_targets)
+ return delegate_command(args, exclude, require)
-def delegate_command(args, exclude, require, integration_targets):
+def delegate_command(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
- :type integration_targets: tuple[IntegrationTarget]
:rtype: bool
"""
if args.venv:
- delegate_venv(args, exclude, require, integration_targets)
+ delegate_venv(args, exclude, require)
return True
if args.docker:
- delegate_docker(args, exclude, require, integration_targets)
+ delegate_docker(args, exclude, require)
return True
if args.remote:
- delegate_remote(args, exclude, require, integration_targets)
+ delegate_remote(args, exclude, require)
return True
return False
@@ -170,7 +161,6 @@ def delegate_command(args, exclude, require, integration_targets):
def delegate_venv(args, # type: EnvironmentConfig
exclude, # type: t.List[str]
require, # type: t.List[str]
- integration_targets, # type: t.Tuple[IntegrationTarget, ...]
): # type: (...) -> None
"""Delegate ansible-test execution to a virtual environment using venv or virtualenv."""
if args.python:
@@ -178,12 +168,6 @@ def delegate_venv(args, # type: EnvironmentConfig
else:
versions = SUPPORTED_PYTHON_VERSIONS
- if args.httptester:
- needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases)
-
- if needs_httptester:
- display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester))
-
if args.venv_system_site_packages:
suffix = '-ssp'
else:
@@ -224,30 +208,26 @@ def delegate_venv(args, # type: EnvironmentConfig
PYTHONPATH=library_path,
)
- run_command(args, cmd, env=env)
+ with support_container_context(args, None) as containers:
+ if containers:
+ cmd.extend(['--containers', json.dumps(containers.to_dict())])
+
+ run_command(args, cmd, env=env)
-def delegate_docker(args, exclude, require, integration_targets):
+def delegate_docker(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
- :type integration_targets: tuple[IntegrationTarget]
"""
+ get_docker_command(required=True) # fail early if docker is not available
+
test_image = args.docker
privileged = args.docker_privileged
- if isinstance(args, ShellConfig):
- use_httptester = args.httptester
- else:
- use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
-
- if use_httptester:
- docker_pull(args, args.httptester)
-
docker_pull(args, test_image)
- httptester_id = None
test_id = None
success = False
@@ -295,11 +275,6 @@ def delegate_docker(args, exclude, require, integration_targets):
try:
create_payload(args, local_source_fd.name)
- if use_httptester:
- httptester_id = run_httptester(args)
- else:
- httptester_id = None
-
test_options = [
'--detach',
'--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
@@ -320,28 +295,7 @@ def delegate_docker(args, exclude, require, integration_targets):
if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)]
- if httptester_id:
- test_options += ['--env', 'HTTPTESTER=1', '--env', 'KRB5_PASSWORD=%s' % args.httptester_krb5_password]
-
- network = get_docker_preferred_network_name(args)
-
- if not is_docker_user_defined_network(network):
- # legacy links are required when using the default bridge network instead of user-defined networks
- for host in HTTPTESTER_HOSTS:
- test_options += ['--link', '%s:%s' % (httptester_id, host)]
-
- if isinstance(args, IntegrationConfig):
- cloud_platforms = get_cloud_providers(args)
-
- for cloud_platform in cloud_platforms:
- test_options += cloud_platform.get_docker_run_options()
-
- test_id = docker_run(args, test_image, options=test_options)[0]
-
- if args.explain:
- test_id = 'test_id'
- else:
- test_id = test_id.strip()
+ test_id = docker_run(args, test_image, options=test_options)
setup_sh = read_text_file(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'))
@@ -377,7 +331,8 @@ def delegate_docker(args, exclude, require, integration_targets):
docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options)
- networks = get_docker_networks(args, test_id)
+ container = docker_inspect(args, test_id)
+ networks = container.get_network_names()
if networks is not None:
for network in networks:
@@ -391,7 +346,11 @@ def delegate_docker(args, exclude, require, integration_targets):
cmd_options += ['--user', 'pytest']
try:
- docker_exec(args, test_id, cmd, options=cmd_options)
+ with support_container_context(args, None) as containers:
+ if containers:
+ cmd.extend(['--containers', json.dumps(containers.to_dict())])
+
+ docker_exec(args, test_id, cmd, options=cmd_options)
# docker_exec will throw SubprocessError if not successful
# If we make it here, all the prep work earlier and the docker_exec line above were all successful.
success = True
@@ -402,16 +361,21 @@ def delegate_docker(args, exclude, require, integration_targets):
remote_results_name = os.path.basename(remote_results_root)
remote_temp_file = os.path.join('/root', remote_results_name + '.tgz')
- make_dirs(local_test_root) # make sure directory exists for collections which have no tests
+ try:
+ make_dirs(local_test_root) # make sure directory exists for collections which have no tests
- with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
- docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root, remote_results_name])
- docker_get(args, test_id, remote_temp_file, local_result_fd.name)
- run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root])
- finally:
- if httptester_id:
- docker_rm(args, httptester_id)
+ with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
+ docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root,
+ remote_results_name])
+ docker_get(args, test_id, remote_temp_file, local_result_fd.name)
+ run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root])
+ except Exception as ex: # pylint: disable=broad-except
+ if success:
+ raise # download errors are fatal, but only if tests succeeded
+ # handle download error here to avoid masking test failures
+ display.warning('Failed to download results while handling an exception: %s' % ex)
+ finally:
if pypi_proxy_id:
docker_rm(args, pypi_proxy_id)
@@ -420,42 +384,26 @@ def delegate_docker(args, exclude, require, integration_targets):
docker_rm(args, test_id)
-def delegate_remote(args, exclude, require, integration_targets):
+def delegate_remote(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
- :type integration_targets: tuple[IntegrationTarget]
"""
remote = args.parsed_remote
core_ci = AnsibleCoreCI(args, remote.platform, remote.version, stage=args.remote_stage, provider=args.remote_provider, arch=remote.arch)
success = False
- raw = False
-
- if isinstance(args, ShellConfig):
- use_httptester = args.httptester
- raw = args.raw
- else:
- use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
-
- if use_httptester and not docker_available():
- display.warning('Assuming --disable-httptester since `docker` is not available.')
- use_httptester = False
- httptester_id = None
ssh_options = []
content_root = None
try:
core_ci.start()
-
- if use_httptester:
- httptester_id, ssh_options = start_httptester(args)
-
core_ci.wait()
python_version = get_python_version(args, get_remote_completion(), args.remote)
+ python_interpreter = None
if remote.platform == 'windows':
# Windows doesn't need the ansible-test fluff, just run the SSH command
@@ -463,7 +411,7 @@ def delegate_remote(args, exclude, require, integration_targets):
manage.setup(python_version)
cmd = ['powershell.exe']
- elif raw:
+ elif isinstance(args, ShellConfig) and args.raw:
manage = ManagePosixCI(core_ci)
manage.setup(python_version)
@@ -487,9 +435,6 @@ def delegate_remote(args, exclude, require, integration_targets):
cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
- if httptester_id:
- cmd += ['--inject-httptester', '--httptester-krb5-password', args.httptester_krb5_password]
-
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (remote.platform, remote.version)]
@@ -502,14 +447,16 @@ def delegate_remote(args, exclude, require, integration_targets):
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
- if isinstance(args, IntegrationConfig):
- cloud_platforms = get_cloud_providers(args)
+ try:
+ ssh_con = core_ci.connection
+ ssh = SshConnectionDetail(core_ci.name, ssh_con.hostname, ssh_con.port, ssh_con.username, core_ci.ssh_key.key, python_interpreter)
- for cloud_platform in cloud_platforms:
- ssh_options += cloud_platform.get_remote_ssh_options()
+ with support_container_context(args, ssh) as containers:
+ if containers:
+ cmd.extend(['--containers', json.dumps(containers.to_dict())])
+
+ manage.ssh(cmd, ssh_options)
- try:
- manage.ssh(cmd, ssh_options)
success = True
finally:
download = False
@@ -532,15 +479,21 @@ def delegate_remote(args, exclude, require, integration_targets):
# pattern and achieve the same goal
cp_opts = '-hr' if remote.platform in ['aix', 'ibmi'] else '-a'
- manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
- manage.download(remote_temp_path, local_test_root)
+ try:
+ command = 'rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root)
+
+ manage.ssh(command, capture=True) # pylint: disable=unexpected-keyword-arg
+ manage.download(remote_temp_path, local_test_root)
+ except Exception as ex: # pylint: disable=broad-except
+ if success:
+ raise # download errors are fatal, but only if tests succeeded
+
+ # handle download error here to avoid masking test failures
+ display.warning('Failed to download results while handling an exception: %s' % ex)
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
- if httptester_id:
- docker_rm(args, httptester_id)
-
def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require):
"""
diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py
index 3ad771bd41..75893e5dd1 100644
--- a/test/lib/ansible_test/_internal/docker_util.py
+++ b/test/lib/ansible_test/_internal/docker_util.py
@@ -4,6 +4,8 @@ __metaclass__ = type
import json
import os
+import random
+import socket
import time
from . import types as t
@@ -27,6 +29,7 @@ from .http import (
from .util_common import (
run_command,
+ raw_command,
)
from .config import (
@@ -35,12 +38,68 @@ from .config import (
BUFFER_SIZE = 256 * 256
+DOCKER_COMMANDS = [
+ 'docker',
+ 'podman',
+]
-def docker_available():
- """
- :rtype: bool
- """
- return find_executable('docker', required=False)
+
+class DockerCommand:
+ """Details about the available docker command."""
+ def __init__(self, command, executable, version): # type: (str, str, str) -> None
+ self.command = command
+ self.executable = executable
+ self.version = version
+
+ @staticmethod
+ def detect(): # type: () -> t.Optional[DockerCommand]
+ """Detect and return the available docker command, or None."""
+ if os.environ.get('ANSIBLE_TEST_PREFER_PODMAN'):
+ commands = list(reversed(DOCKER_COMMANDS))
+ else:
+ commands = DOCKER_COMMANDS
+
+ for command in commands:
+ executable = find_executable(command, required=False)
+
+ if executable:
+ version = raw_command([command, '-v'], capture=True)[0].strip()
+
+ if command == 'docker' and 'podman' in version:
+ continue # avoid detecting podman as docker
+
+ display.info('Detected "%s" container runtime version: %s' % (command, version), verbosity=1)
+
+ return DockerCommand(command, executable, version)
+
+ return None
+
+
+def get_docker_command(required=False): # type: (bool) -> t.Optional[DockerCommand]
+ """Return the docker command to invoke. Raises an exception if docker is not available."""
+ try:
+ return get_docker_command.cmd
+ except AttributeError:
+ get_docker_command.cmd = DockerCommand.detect()
+
+ if required and not get_docker_command.cmd:
+ raise ApplicationError("No container runtime detected. Supported commands: %s" % ', '.join(DOCKER_COMMANDS))
+
+ return get_docker_command.cmd
+
+
+def get_docker_host_ip(): # type: () -> str
+ """Return the IP of the Docker host."""
+ try:
+ return get_docker_host_ip.ip
+ except AttributeError:
+ pass
+
+ docker_host_ip = get_docker_host_ip.ip = socket.gethostbyname(get_docker_hostname())
+
+ display.info('Detected docker host IP: %s' % docker_host_ip, verbosity=1)
+
+ return docker_host_ip
def get_docker_hostname(): # type: () -> str
@@ -101,45 +160,6 @@ def get_docker_container_id():
return container_id
-def get_docker_container_ip(args, container_id):
- """
- :type args: EnvironmentConfig
- :type container_id: str
- :rtype: str
- """
- results = docker_inspect(args, container_id)
- network_settings = results[0]['NetworkSettings']
- networks = network_settings.get('Networks')
-
- if networks:
- network_name = get_docker_preferred_network_name(args) or 'bridge'
- ipaddress = networks[network_name]['IPAddress']
- else:
- # podman doesn't provide Networks, fall back to using IPAddress
- ipaddress = network_settings['IPAddress']
-
- if not ipaddress:
- raise ApplicationError('Cannot retrieve IP address for container: %s' % container_id)
-
- return ipaddress
-
-
-def get_docker_network_name(args, container_id): # type: (EnvironmentConfig, str) -> str
- """
- Return the network name of the specified container.
- Raises an exception if zero or more than one network is found.
- """
- networks = get_docker_networks(args, container_id)
-
- if not networks:
- raise ApplicationError('No network found for Docker container: %s.' % container_id)
-
- if len(networks) > 1:
- raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (container_id, ', '.join(networks)))
-
- return networks[0]
-
-
def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str
"""
Return the preferred network name for use with Docker. The selection logic is:
@@ -147,6 +167,11 @@ def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str
- the network of the currently running docker container (if any)
- the default docker network (returns None)
"""
+ try:
+ return get_docker_preferred_network_name.network
+ except AttributeError:
+ pass
+
network = None
if args.docker_network:
@@ -157,7 +182,10 @@ def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str
if current_container_id:
# Make sure any additional containers we launch use the same network as the current container we're running in.
# This is needed when ansible-test is running in a container that is not connected to Docker's default network.
- network = get_docker_network_name(args, current_container_id)
+ container = docker_inspect(args, current_container_id, always=True)
+ network = container.get_network_name()
+
+ get_docker_preferred_network_name.network = network
return network
@@ -167,26 +195,12 @@ def is_docker_user_defined_network(network): # type: (str) -> bool
return network and network != 'bridge'
-def get_docker_networks(args, container_id):
- """
- :param args: EnvironmentConfig
- :param container_id: str
- :rtype: list[str]
- """
- results = docker_inspect(args, container_id)
- # podman doesn't return Networks- just silently return None if it's missing...
- networks = results[0]['NetworkSettings'].get('Networks')
- if networks is None:
- return None
- return sorted(networks)
-
-
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
- if ('@' in image or ':' in image) and docker_images(args, image):
+ if ('@' in image or ':' in image) and docker_image_exists(args, image):
display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2)
return
@@ -205,6 +219,11 @@ def docker_pull(args, image):
raise ApplicationError('Failed to pull docker image "%s".' % image)
+def docker_cp_to(args, container_id, src, dst): # type: (EnvironmentConfig, str, str, str) -> None
+ """Copy a file to the specified container."""
+ docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)])
+
+
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
@@ -238,7 +257,7 @@ def docker_run(args, image, options, cmd=None, create_only=False):
:type options: list[str] | None
:type cmd: list[str] | None
:type create_only[bool] | False
- :rtype: str | None, str | None
+ :rtype: str
"""
if not options:
options = []
@@ -255,12 +274,16 @@ def docker_run(args, image, options, cmd=None, create_only=False):
if is_docker_user_defined_network(network):
# Only when the network is not the default bridge network.
- # Using this with the default bridge network results in an error when using --link: links are only supported for user-defined networks
options.extend(['--network', network])
for _iteration in range(1, 3):
try:
- return docker_command(args, [command] + options + [image] + cmd, capture=True)
+ stdout = docker_command(args, [command] + options + [image] + cmd, capture=True)[0]
+
+ if args.explain:
+ return ''.join(random.choice('0123456789abcdef') for _iteration in range(64))
+
+ return stdout.strip()
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
@@ -269,7 +292,7 @@ def docker_run(args, image, options, cmd=None, create_only=False):
raise ApplicationError('Failed to run docker image "%s".' % image)
-def docker_start(args, container_id, options): # type: (EnvironmentConfig, str, t.List[str]) -> (t.Optional[str], t.Optional[str])
+def docker_start(args, container_id, options=None): # type: (EnvironmentConfig, str, t.Optional[t.List[str]]) -> (t.Optional[str], t.Optional[str])
"""
Start a docker container by name or ID
"""
@@ -287,33 +310,6 @@ def docker_start(args, container_id, options): # type: (EnvironmentConfig, str,
raise ApplicationError('Failed to run docker container "%s".' % container_id)
-def docker_images(args, image):
- """
- :param args: CommonConfig
- :param image: str
- :rtype: list[dict[str, any]]
- """
- try:
- stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
- except SubprocessError as ex:
- if 'no such image' in ex.stderr:
- return [] # podman does not handle this gracefully, exits 125
-
- if 'function "json" not defined' in ex.stderr:
- # podman > 2 && < 2.2.0 breaks with --format {{json .}}, and requires --format json
- # So we try this as a fallback. If it fails again, we just raise the exception and bail.
- stdout, _dummy = docker_command(args, ['images', image, '--format', 'json'], capture=True, always=True)
- else:
- raise ex
-
- if stdout.startswith('['):
- # modern podman outputs a pretty-printed json list. Just load the whole thing.
- return json.loads(stdout)
-
- # docker outputs one json object per line (jsonl)
- return [json.loads(line) for line in stdout.splitlines()]
-
-
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
@@ -328,25 +324,135 @@ def docker_rm(args, container_id):
raise ex
-def docker_inspect(args, container_id):
+class DockerError(Exception):
+ """General Docker error."""
+
+
+class ContainerNotFoundError(DockerError):
+ """The container identified by `identifier` was not found."""
+ def __init__(self, identifier):
+ super(ContainerNotFoundError, self).__init__('The container "%s" was not found.' % identifier)
+
+ self.identifier = identifier
+
+
+class DockerInspect:
+ """The results of `docker inspect` for a single container."""
+ def __init__(self, args, inspection): # type: (EnvironmentConfig, t.Dict[str, t.Any]) -> None
+ self.args = args
+ self.inspection = inspection
+
+ # primary properties
+
+ @property
+ def id(self): # type: () -> str
+ """Return the ID of the container."""
+ return self.inspection['Id']
+
+ @property
+ def network_settings(self): # type: () -> t.Dict[str, t.Any]
+ """Return a dictionary of the container network settings."""
+ return self.inspection['NetworkSettings']
+
+ @property
+ def state(self): # type: () -> t.Dict[str, t.Any]
+ """Return a dictionary of the container state."""
+ return self.inspection['State']
+
+ @property
+ def config(self): # type: () -> t.Dict[str, t.Any]
+ """Return a dictionary of the container configuration."""
+ return self.inspection['Config']
+
+ # nested properties
+
+ @property
+ def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]
+ """Return a dictionary of ports the container has published."""
+ return self.network_settings['Ports']
+
+ @property
+ def networks(self): # type: () -> t.Optional[t.Dict[str, t.Dict[str, t.Any]]]
+ """Return a dictionary of the networks the container is attached to, or None if running under podman, which does not support networks."""
+ return self.network_settings.get('Networks')
+
+ @property
+ def running(self): # type: () -> bool
+ """Return True if the container is running, otherwise False."""
+ return self.state['Running']
+
+ @property
+ def env(self): # type: () -> t.List[str]
+ """Return a list of the environment variables used to create the container."""
+ return self.config['Env']
+
+ @property
+ def image(self): # type: () -> str
+ """Return the image used to create the container."""
+ return self.config['Image']
+
+ # functions
+
+ def env_dict(self): # type: () -> t.Dict[str, str]
+ """Return a dictionary of the environment variables used to create the container."""
+ return dict((item[0], item[1]) for item in [e.split('=', 1) for e in self.env])
+
+ def get_tcp_port(self, port): # type: (int) -> t.Optional[t.List[t.Dict[str, str]]]
+ """Return a list of the endpoints published by the container for the specified TCP port, or None if it is not published."""
+ return self.ports.get('%d/tcp' % port)
+
+ def get_network_names(self): # type: () -> t.Optional[t.List[str]]
+ """Return a list of the network names the container is attached to."""
+ if self.networks is None:
+ return None
+
+ return sorted(self.networks)
+
+ def get_network_name(self): # type: () -> str
+ """Return the network name the container is attached to. Raises an exception if no network, or more than one, is attached."""
+ networks = self.get_network_names()
+
+ if not networks:
+ raise ApplicationError('No network found for Docker container: %s.' % self.id)
+
+ if len(networks) > 1:
+ raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))
+
+ return networks[0]
+
+ def get_ip_address(self): # type: () -> t.Optional[str]
+ """Return the IP address of the container for the preferred docker network."""
+ if self.networks:
+ network_name = get_docker_preferred_network_name(self.args) or 'bridge'
+ ipaddress = self.networks[network_name]['IPAddress']
+ else:
+ ipaddress = self.network_settings['IPAddress']
+
+ if not ipaddress:
+ return None
+
+ return ipaddress
+
+
+def docker_inspect(args, identifier, always=False): # type: (EnvironmentConfig, str, bool) -> DockerInspect
"""
- :type args: EnvironmentConfig
- :type container_id: str
- :rtype: list[dict]
+ Return the results of `docker inspect` for the specified container.
+ Raises a ContainerNotFoundError if the container was not found.
"""
- if args.explain:
- return []
-
try:
- stdout = docker_command(args, ['inspect', container_id], capture=True)[0]
- return json.loads(stdout)
+ stdout = docker_command(args, ['inspect', identifier], capture=True, always=always)[0]
except SubprocessError as ex:
- if 'no such image' in ex.stderr:
- return [] # podman does not handle this gracefully, exits 125
- try:
- return json.loads(ex.stdout)
- except Exception:
- raise ex
+ stdout = ex.stdout
+
+ if args.explain and not always:
+ items = []
+ else:
+ items = json.loads(stdout)
+
+ if len(items) == 1:
+ return DockerInspect(args, items[0])
+
+ raise ContainerNotFoundError(identifier)
def docker_network_disconnect(args, container_id, network):
@@ -358,6 +464,16 @@ def docker_network_disconnect(args, container_id, network):
docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
+def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool
+ """Return True if the image exists, otherwise False."""
+ try:
+ docker_command(args, ['image', 'inspect', image], capture=True)
+ except SubprocessError:
+ return False
+
+ return True
+
+
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
@@ -428,7 +544,8 @@ def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=Fal
:rtype: str | None, str | None
"""
env = docker_environment()
- return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always, data=data)
+ command = get_docker_command(required=True).command
+ return run_command(args, [command] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always, data=data)
def docker_environment():
diff --git a/test/lib/ansible_test/_internal/env.py b/test/lib/ansible_test/_internal/env.py
index 60c0245e08..ef04c692fc 100644
--- a/test/lib/ansible_test/_internal/env.py
+++ b/test/lib/ansible_test/_internal/env.py
@@ -22,7 +22,6 @@ from .io import (
from .util import (
display,
- find_executable,
SubprocessError,
ApplicationError,
get_ansible_version,
@@ -36,6 +35,7 @@ from .util_common import (
)
from .docker_util import (
+ get_docker_command,
docker_info,
docker_version
)
@@ -269,11 +269,15 @@ def get_docker_details(args):
:type args: CommonConfig
:rtype: dict[str, any]
"""
- docker = find_executable('docker', required=False)
+ docker = get_docker_command()
+
+ executable = None
info = None
version = None
if docker:
+ executable = docker.executable
+
try:
info = docker_info(args)
except SubprocessError as ex:
@@ -285,7 +289,7 @@ def get_docker_details(args):
display.warning('Failed to collect docker version:\n%s' % ex)
docker_details = dict(
- executable=docker,
+ executable=executable,
info=info,
version=version,
)
diff --git a/test/lib/ansible_test/_internal/executor.py b/test/lib/ansible_test/_internal/executor.py
index c3755a7113..8047f1452d 100644
--- a/test/lib/ansible_test/_internal/executor.py
+++ b/test/lib/ansible_test/_internal/executor.py
@@ -56,14 +56,11 @@ from .util import (
remove_tree,
find_executable,
raw_command,
- get_available_port,
generate_pip_command,
find_python,
cmd_quote,
- ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_DATA_ROOT,
ANSIBLE_TEST_CONFIG_ROOT,
- get_ansible_version,
tempdir,
open_zipfile,
SUPPORTED_PYTHON_VERSIONS,
@@ -88,18 +85,18 @@ from .util_common import (
from .docker_util import (
docker_pull,
docker_run,
- docker_available,
- docker_rm,
- get_docker_container_id,
- get_docker_container_ip,
- get_docker_hostname,
- get_docker_preferred_network_name,
- is_docker_user_defined_network,
+ docker_inspect,
+)
+
+from .containers import (
+ SshConnectionDetail,
+ create_container_hooks,
)
from .ansible_util import (
ansible_environment,
check_pyyaml,
+ run_playbook,
)
from .target import (
@@ -153,13 +150,6 @@ from .http import (
urlparse,
)
-HTTPTESTER_HOSTS = (
- 'ansible.http.tests',
- 'sni1.ansible.http.tests',
- 'fail.ansible.http.tests',
- 'self-signed.ansible.http.tests',
-)
-
def check_startup():
"""Checks to perform at startup before running commands."""
@@ -514,9 +504,6 @@ def command_shell(args):
install_command_requirements(args)
- if args.inject_httptester:
- inject_httptester(args)
-
cmd = create_shell_command(['bash', '-i'])
run_command(args, cmd)
@@ -532,7 +519,12 @@ def command_posix_integration(args):
all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
internal_targets = command_integration_filter(args, all_targets)
- command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+
+ managed_connections = None # type: t.Optional[t.List[SshConnectionDetail]]
+
+ pre_target, post_target = create_container_hooks(args, managed_connections)
+
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path, pre_target=pre_target, post_target=post_target)
def command_network_integration(args):
@@ -749,9 +741,7 @@ def command_windows_integration(args):
all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init)
instances = [] # type: t.List[WrappedThread]
- pre_target = None
- post_target = None
- httptester_id = None
+ managed_connections = [] # type: t.List[SshConnectionDetail]
if args.windows:
get_python_path(args, args.python_executable) # initialize before starting threads
@@ -777,76 +767,41 @@ def command_windows_integration(args):
if not args.explain:
write_text_file(inventory_path, inventory)
- use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in internal_targets)
- # if running under Docker delegation, the httptester may have already been started
- docker_httptester = bool(os.environ.get("HTTPTESTER", False))
-
- if use_httptester and not docker_available() and not docker_httptester:
- display.warning('Assuming --disable-httptester since `docker` is not available.')
- elif use_httptester:
- if docker_httptester:
- # we are running in a Docker container that is linked to the httptester container, we just need to
- # forward these requests to the linked hostname
- first_host = HTTPTESTER_HOSTS[0]
- ssh_options = [
- "-R", "8080:%s:80" % first_host,
- "-R", "8443:%s:443" % first_host,
- "-R", "8444:%s:444" % first_host
- ]
- else:
- # we are running directly and need to start the httptester container ourselves and forward the port
- # from there manually set so HTTPTESTER env var is set during the run
- args.inject_httptester = True
- httptester_id, ssh_options = start_httptester(args)
-
- # to get this SSH command to run in the background we need to set to run in background (-f) and disable
- # the pty allocation (-T)
- ssh_options.insert(0, "-fT")
-
- # create a script that will continue to run in the background until the script is deleted, this will
- # cleanup and close the connection
- def forward_ssh_ports(target):
- """
- :type target: IntegrationTarget
- """
- if 'needs/httptester/' not in target.aliases:
- return
-
- for remote in [r for r in remotes if r.version != '2008']:
- manage = ManageWindowsCI(remote)
- manage.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'windows-httptester.ps1'), watcher_path)
-
- # We cannot pass an array of string with -File so we just use a delimiter for multiple values
- script = "powershell.exe -NoProfile -ExecutionPolicy Bypass -File .\\%s -Hosts \"%s\"" \
- % (watcher_path, "|".join(HTTPTESTER_HOSTS))
- if args.verbosity > 3:
- script += " -Verbose"
- manage.ssh(script, options=ssh_options, force_pty=False)
-
- def cleanup_ssh_ports(target):
- """
- :type target: IntegrationTarget
- """
- if 'needs/httptester/' not in target.aliases:
- return
-
- for remote in [r for r in remotes if r.version != '2008']:
- # delete the tmp file that keeps the http-tester alive
- manage = ManageWindowsCI(remote)
- manage.ssh("cmd.exe /c \"del %s /F /Q\"" % watcher_path, force_pty=False)
-
- watcher_path = "ansible-test-http-watcher-%s.ps1" % time.time()
- pre_target = forward_ssh_ports
- post_target = cleanup_ssh_ports
-
- def run_playbook(playbook, run_playbook_vars): # type: (str, t.Dict[str, t.Any]) -> None
- playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
- command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)]
- if args.verbosity:
- command.append('-%s' % ('v' * args.verbosity))
+ for core_ci in remotes:
+ ssh_con = core_ci.connection
+ ssh = SshConnectionDetail(core_ci.name, ssh_con.hostname, 22, ssh_con.username, core_ci.ssh_key.key, shell_type='powershell')
+ managed_connections.append(ssh)
+ elif args.explain:
+ identity_file = SshKey(args).key
+
+ # mock connection details to prevent tracebacks in explain mode
+ managed_connections = [SshConnectionDetail(
+ name='windows',
+ host='windows',
+ port=22,
+ user='administrator',
+ identity_file=identity_file,
+ shell_type='powershell',
+ )]
+ else:
+ inventory = parse_inventory(args, inventory_path)
+ hosts = get_hosts(inventory, 'windows')
+ identity_file = SshKey(args).key
+
+ managed_connections = [SshConnectionDetail(
+ name=name,
+ host=config['ansible_host'],
+ port=22,
+ user=config['ansible_user'],
+ identity_file=identity_file,
+ shell_type='powershell',
+ ) for name, config in hosts.items()]
- env = ansible_environment(args)
- intercept_command(args, command, '', env, disable_coverage=True)
+ if managed_connections:
+ display.info('Generated SSH connection details from inventory:\n%s' % (
+ '\n'.join('%s %s@%s:%d' % (ssh.name, ssh.user, ssh.host, ssh.port) for ssh in managed_connections)), verbosity=1)
+
+ pre_target, post_target = create_container_hooks(args, managed_connections)
remote_temp_path = None
@@ -854,7 +809,7 @@ def command_windows_integration(args):
# Create the remote directory that is writable by everyone. Use Ansible to talk to the remote host.
remote_temp_path = 'C:\\ansible_test_coverage_%s' % time.time()
playbook_vars = {'remote_temp_path': remote_temp_path}
- run_playbook('windows_coverage_setup.yml', playbook_vars)
+ run_playbook(args, inventory_path, 'windows_coverage_setup.yml', playbook_vars)
success = False
@@ -863,14 +818,11 @@ def command_windows_integration(args):
post_target=post_target, remote_temp_path=remote_temp_path)
success = True
finally:
- if httptester_id:
- docker_rm(args, httptester_id)
-
if remote_temp_path:
# Zip up the coverage files that were generated and fetch it back to localhost.
with tempdir() as local_temp_path:
playbook_vars = {'remote_temp_path': remote_temp_path, 'local_temp_path': local_temp_path}
- run_playbook('windows_coverage_teardown.yml', playbook_vars)
+ run_playbook(args, inventory_path, 'windows_coverage_teardown.yml', playbook_vars)
for filename in os.listdir(local_temp_path):
with open_zipfile(os.path.join(local_temp_path, filename)) as coverage_zip:
@@ -887,6 +839,9 @@ def windows_init(args, internal_targets): # pylint: disable=locally-disabled, u
:type args: WindowsIntegrationConfig
:type internal_targets: tuple[IntegrationTarget]
"""
+ # generate an ssh key (if needed) up front once, instead of for each instance
+ SshKey(args)
+
if not args.windows:
return
@@ -955,14 +910,7 @@ def windows_inventory(remotes):
if remote.ssh_key:
options["ansible_ssh_private_key_file"] = os.path.abspath(remote.ssh_key.key)
- if remote.name == 'windows-2008':
- options.update(
- # force 2008 to use PSRP for the connection plugin
- ansible_connection='psrp',
- ansible_psrp_auth='basic',
- ansible_psrp_cert_validation='ignore',
- )
- elif remote.name == 'windows-2016':
+ if remote.name == 'windows-2016':
options.update(
# force 2016 to use NTLM + HTTP message encryption
ansible_connection='winrm',
@@ -1053,24 +1001,23 @@ def command_integration_filter(args, # type: TIntegrationConfig
data_context().register_payload_callback(integration_config_callback)
if args.delegate:
- raise Delegate(require=require, exclude=exclude, integration_targets=internal_targets)
+ raise Delegate(require=require, exclude=exclude)
install_command_requirements(args)
return internal_targets
-def command_integration_filtered(args, targets, all_targets, inventory_path, pre_target=None, post_target=None,
- remote_temp_path=None):
- """
- :type args: IntegrationConfig
- :type targets: tuple[IntegrationTarget]
- :type all_targets: tuple[IntegrationTarget]
- :type inventory_path: str
- :type pre_target: (IntegrationTarget) -> None | None
- :type post_target: (IntegrationTarget) -> None | None
- :type remote_temp_path: str | None
- """
+def command_integration_filtered(
+ args, # type: IntegrationConfig
+ targets, # type: t.Tuple[IntegrationTarget]
+ all_targets, # type: t.Tuple[IntegrationTarget]
+ inventory_path, # type: str
+ pre_target=None, # type: t.Optional[t.Callable[IntegrationTarget]]
+ post_target=None, # type: t.Optional[t.Callable[IntegrationTarget]]
+ remote_temp_path=None, # type: t.Optional[str]
+):
+ """Run integration tests for the specified targets."""
found = False
passed = []
failed = []
@@ -1108,10 +1055,6 @@ def command_integration_filtered(args, targets, all_targets, inventory_path, pre
display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
time.sleep(seconds)
- # Windows is different as Ansible execution is done locally but the host is remote
- if args.inject_httptester and not isinstance(args, WindowsIntegrationConfig):
- inject_httptester(args)
-
start_at_task = args.start_at_task
results = {}
@@ -1158,6 +1101,9 @@ def command_integration_filtered(args, targets, all_targets, inventory_path, pre
start_time = time.time()
+ if pre_target:
+ pre_target(target)
+
run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, True)
if not args.explain:
@@ -1165,9 +1111,6 @@ def command_integration_filtered(args, targets, all_targets, inventory_path, pre
remove_tree(test_dir)
make_dirs(test_dir)
- if pre_target:
- pre_target(target)
-
try:
if target.script_path:
command_integration_script(args, target, test_dir, inventory_path, common_temp_path,
@@ -1261,155 +1204,21 @@ def command_integration_filtered(args, targets, all_targets, inventory_path, pre
len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
-def start_httptester(args):
- """
- :type args: EnvironmentConfig
- :rtype: str, list[str]
- """
-
- # map ports from remote -> localhost -> container
- # passing through localhost is only used when ansible-test is not already running inside a docker container
- ports = [
- dict(
- remote=8080,
- container=80,
- ),
- dict(
- remote=8088,
- container=88,
- ),
- dict(
- remote=8443,
- container=443,
- ),
- dict(
- remote=8444,
- container=444,
- ),
- dict(
- remote=8749,
- container=749,
- ),
- ]
-
- container_id = get_docker_container_id()
-
- if not container_id:
- for item in ports:
- item['localhost'] = get_available_port()
-
- docker_pull(args, args.httptester)
-
- httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port))
-
- if container_id:
- container_host = get_docker_container_ip(args, httptester_id)
- display.info('Found httptester container address: %s' % container_host, verbosity=1)
- else:
- container_host = get_docker_hostname()
-
- ssh_options = []
-
- for port in ports:
- ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))]
-
- return httptester_id, ssh_options
-
-
-def run_httptester(args, ports=None):
- """
- :type args: EnvironmentConfig
- :type ports: dict[int, int] | None
- :rtype: str
- """
- options = [
- '--detach',
- '--env', 'KRB5_PASSWORD=%s' % args.httptester_krb5_password,
- ]
-
- if ports:
- for localhost_port, container_port in ports.items():
- options += ['-p', '%d:%d' % (localhost_port, container_port)]
-
- network = get_docker_preferred_network_name(args)
-
- if is_docker_user_defined_network(network):
- # network-scoped aliases are only supported for containers in user defined networks
- for alias in HTTPTESTER_HOSTS:
- options.extend(['--network-alias', alias])
-
- httptester_id = docker_run(args, args.httptester, options=options)[0]
-
- if args.explain:
- httptester_id = 'httptester_id'
- else:
- httptester_id = httptester_id.strip()
-
- return httptester_id
-
-
-def inject_httptester(args):
- """
- :type args: CommonConfig
- """
- comment = ' # ansible-test httptester\n'
- append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS]
- hosts_path = '/etc/hosts'
-
- original_lines = read_text_file(hosts_path).splitlines(True)
-
- if not any(line.endswith(comment) for line in original_lines):
- write_text_file(hosts_path, ''.join(original_lines + append_lines))
-
- # determine which forwarding mechanism to use
- pfctl = find_executable('pfctl', required=False)
- iptables = find_executable('iptables', required=False)
-
- if pfctl:
- kldload = find_executable('kldload', required=False)
-
- if kldload:
- try:
- run_command(args, ['kldload', 'pf'], capture=True)
- except SubprocessError:
- pass # already loaded
-
- rules = '''
-rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
-rdr pass inet proto tcp from any to any port 88 -> 127.0.0.1 port 8088
-rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
-rdr pass inet proto tcp from any to any port 444 -> 127.0.0.1 port 8444
-rdr pass inet proto tcp from any to any port 749 -> 127.0.0.1 port 8749
-'''
- cmd = ['pfctl', '-ef', '-']
-
- try:
- run_command(args, cmd, capture=True, data=rules)
- except SubprocessError:
- pass # non-zero exit status on success
-
- elif iptables:
- ports = [
- (80, 8080),
- (88, 8088),
- (443, 8443),
- (444, 8444),
- (749, 8749),
- ]
+def parse_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> t.Dict[str, t.Any]
+ """Return a dict parsed from the given inventory file."""
+ cmd = ['ansible-inventory', '-i', inventory_path, '--list']
+ env = ansible_environment(args)
+ inventory = json.loads(intercept_command(args, cmd, '', env, capture=True, disable_coverage=True)[0])
+ return inventory
- for src, dst in ports:
- rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)]
- try:
- # check for existing rule
- cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule
- run_command(args, cmd, capture=True)
- except SubprocessError:
- # append rule when it does not exist
- cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule
- run_command(args, cmd, capture=True)
- else:
- raise ApplicationError('No supported port forwarding mechanism detected.')
+def get_hosts(inventory, group_name): # type: (t.Dict[str, t.Any], str) -> t.Dict[str, t.Dict[str, t.Any]]
+ """Return a dict of hosts from the specified group in the given inventory."""
+ hostvars = inventory.get('_meta', {}).get('hostvars', {})
+ group = inventory.get(group_name, {})
+ host_names = group.get('hosts', [])
+ hosts = dict((name, hostvars[name]) for name in host_names)
+ return hosts
def run_pypi_proxy(args): # type: (EnvironmentConfig) -> t.Tuple[t.Optional[str], t.Optional[str]]
@@ -1441,14 +1250,14 @@ def run_pypi_proxy(args): # type: (EnvironmentConfig) -> t.Tuple[t.Optional[str
docker_pull(args, proxy_image)
- container_id = docker_run(args, proxy_image, options=options)[0]
+ container_id = docker_run(args, proxy_image, options=options)
- if args.explain:
- container_id = 'pypi_id'
- container_ip = '127.0.0.1'
- else:
- container_id = container_id.strip()
- container_ip = get_docker_container_ip(args, container_id)
+ container = docker_inspect(args, container_id)
+
+ container_ip = container.get_ip_address()
+
+ if not container_ip:
+ raise Exception('PyPI container IP not available.')
endpoint = 'http://%s:%d/root/pypi/+simple/' % (container_ip, port)
@@ -1586,12 +1395,6 @@ def integration_environment(args, target, test_dir, inventory_path, ansible_conf
"""
env = ansible_environment(args, ansible_config=ansible_config)
- if args.inject_httptester:
- env.update(dict(
- HTTPTESTER='1',
- KRB5_PASSWORD=args.httptester_krb5_password,
- ))
-
callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
integration = dict(
@@ -1636,6 +1439,14 @@ def command_integration_script(args, target, test_dir, inventory_path, temp_path
if cloud_environment:
env_config = cloud_environment.get_environment_config()
+ if env_config:
+ display.info('>>> Environment Config\n%s' % json.dumps(dict(
+ env_vars=env_config.env_vars,
+ ansible_vars=env_config.ansible_vars,
+ callback_plugins=env_config.callback_plugins,
+ module_defaults=env_config.module_defaults,
+ ), indent=4, sort_keys=True), verbosity=3)
+
with integration_test_environment(args, target, inventory_path) as test_env:
cmd = ['./%s' % os.path.basename(target.script_path)]
@@ -1658,6 +1469,7 @@ def command_integration_script(args, target, test_dir, inventory_path, temp_path
cmd += ['-e', '@%s' % config_path]
module_coverage = 'non_local/' not in target.aliases
+
intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
remote_temp_path=remote_temp_path, module_coverage=module_coverage)
@@ -1694,11 +1506,20 @@ def command_integration_role(args, target, start_at_task, test_dir, inventory_pa
hosts = 'testhost'
gather_facts = True
+ if not isinstance(args, NetworkIntegrationConfig):
cloud_environment = get_cloud_environment(args, target)
if cloud_environment:
env_config = cloud_environment.get_environment_config()
+ if env_config:
+ display.info('>>> Environment Config\n%s' % json.dumps(dict(
+ env_vars=env_config.env_vars,
+ ansible_vars=env_config.ansible_vars,
+ callback_plugins=env_config.callback_plugins,
+ module_defaults=env_config.module_defaults,
+ ), indent=4, sort_keys=True), verbosity=3)
+
with integration_test_environment(args, target, inventory_path) as test_env:
if os.path.exists(test_env.vars_file):
vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
@@ -1758,6 +1579,9 @@ def command_integration_role(args, target, start_at_task, test_dir, inventory_pa
ANSIBLE_PLAYBOOK_DIR=cwd,
))
+ if env_config and env_config.env_vars:
+ env.update(env_config.env_vars)
+
env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
module_coverage = 'non_local/' not in target.aliases
@@ -2278,17 +2102,15 @@ class NoTestsForChanges(ApplicationWarning):
class Delegate(Exception):
"""Trigger command delegation."""
- def __init__(self, exclude=None, require=None, integration_targets=None):
+ def __init__(self, exclude=None, require=None):
"""
:type exclude: list[str] | None
:type require: list[str] | None
- :type integration_targets: tuple[IntegrationTarget] | None
"""
super(Delegate, self).__init__()
self.exclude = exclude or []
self.require = require or []
- self.integration_targets = integration_targets or tuple()
class AllTargetsSkipped(ApplicationWarning):
diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
index e21c093ae6..4bfd5ef8a6 100644
--- a/test/lib/ansible_test/_internal/sanity/integration_aliases.py
+++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
@@ -271,10 +271,17 @@ class IntegrationAliasesTest(SanityVersionNeutral):
)
for cloud in clouds:
+ if cloud == 'httptester':
+ find = self.format_test_group_alias('linux').replace('linux', 'posix')
+ find_incidental = ['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX]
+ else:
+ find = self.format_test_group_alias(cloud, 'generic')
+ find_incidental = ['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX]
+
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
- find=self.format_test_group_alias(cloud, 'cloud'),
- find_incidental=['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX],
+ find=find,
+ find_incidental=find_incidental,
)
return messages
diff --git a/test/lib/ansible_test/_internal/ssh.py b/test/lib/ansible_test/_internal/ssh.py
new file mode 100644
index 0000000000..6d16e78f3e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ssh.py
@@ -0,0 +1,264 @@
+"""High level functions for working with SSH."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import random
+import re
+import subprocess
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_text,
+)
+
+from .util import (
+ ApplicationError,
+ cmd_quote,
+ common_environment,
+ devnull,
+ display,
+ exclude_none_values,
+ sanitize_host_name,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+
+class SshConnectionDetail:
+ """Information needed to establish an SSH connection to a host."""
+ def __init__(self,
+ name, # type: str
+ host, # type: str
+ port, # type: t.Optional[int]
+ user, # type: str
+ identity_file, # type: str
+ python_interpreter=None, # type: t.Optional[str]
+ shell_type=None, # type: t.Optional[str]
+ ): # type: (...) -> None
+ self.name = sanitize_host_name(name)
+ self.host = host
+ self.port = port
+ self.user = user
+ self.identity_file = identity_file
+ self.python_interpreter = python_interpreter
+ self.shell_type = shell_type
+
+
+class SshProcess:
+ """Wrapper around an SSH process."""
+ def __init__(self, process): # type: (t.Optional[subprocess.Popen]) -> None
+ self._process = process
+ self.pending_forwards = None # type: t.Optional[t.Set[t.Tuple[str, int]]]
+
+ self.forwards = {} # type: t.Dict[t.Tuple[str, int], int]
+
+ def terminate(self): # type: () -> None
+ """Terminate the SSH process."""
+ if not self._process:
+ return # explain mode
+
+ # noinspection PyBroadException
+ try:
+ self._process.terminate()
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ def wait(self): # type: () -> None
+ """Wait for the SSH process to terminate."""
+ if not self._process:
+ return # explain mode
+
+ self._process.wait()
+
+ def collect_port_forwards(self): # type: (SshProcess) -> t.Dict[t.Tuple[str, int], int]
+ """Collect port assignments for dynamic SSH port forwards."""
+ errors = []
+
+ display.info('Collecting %d SSH port forward(s).' % len(self.pending_forwards), verbosity=2)
+
+ while self.pending_forwards:
+ if self._process:
+ line_bytes = self._process.stderr.readline()
+
+ if not line_bytes:
+ if errors:
+ details = ':\n%s' % '\n'.join(errors)
+ else:
+ details = '.'
+
+ raise ApplicationError('SSH port forwarding failed%s' % details)
+
+ line = to_text(line_bytes).strip()
+
+ match = re.search(r'^Allocated port (?P<src_port>[0-9]+) for remote forward to (?P<dst_host>[^:]+):(?P<dst_port>[0-9]+)$', line)
+
+ if not match:
+ if re.search(r'^Warning: Permanently added .* to the list of known hosts\.$', line):
+ continue
+
+ display.warning('Unexpected SSH port forwarding output: %s' % line, verbosity=2)
+
+ errors.append(line)
+ continue
+
+ src_port = int(match.group('src_port'))
+ dst_host = str(match.group('dst_host'))
+ dst_port = int(match.group('dst_port'))
+
+ dst = (dst_host, dst_port)
+ else:
+ # explain mode
+ dst = list(self.pending_forwards)[0]
+ src_port = random.randint(40000, 50000)
+
+ self.pending_forwards.remove(dst)
+ self.forwards[dst] = src_port
+
+ display.info('Collected %d SSH port forward(s):\n%s' % (
+ len(self.forwards), '\n'.join('%s -> %s:%s' % (src_port, dst[0], dst[1]) for dst, src_port in sorted(self.forwards.items()))), verbosity=2)
+
+ return self.forwards
+
+
+def create_ssh_command(
+ ssh, # type: SshConnectionDetail
+ options=None, # type: t.Optional[t.Dict[str, t.Union[str, int]]]
+ cli_args=None, # type: t.List[str]
+ command=None, # type: t.Optional[str]
+): # type: (...) -> t.List[str]
+ """Create an SSH command using the specified options."""
+ cmd = [
+ 'ssh',
+ '-n', # prevent reading from stdin
+ '-i', ssh.identity_file, # file from which the identity for public key authentication is read
+ ]
+
+ if not command:
+ cmd.append('-N') # do not execute a remote command
+
+ if ssh.port:
+ cmd.extend(['-p', str(ssh.port)]) # port to connect to on the remote host
+
+ if ssh.user:
+ cmd.extend(['-l', ssh.user]) # user to log in as on the remote machine
+
+ ssh_options = dict(
+ BatchMode='yes',
+ ExitOnForwardFailure='yes',
+ LogLevel='ERROR',
+ ServerAliveCountMax=4,
+ ServerAliveInterval=15,
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ )
+
+ ssh_options.update(options or {})
+
+ for key, value in sorted(ssh_options.items()):
+ cmd.extend(['-o', '='.join([key, str(value)])])
+
+ cmd.extend(cli_args or [])
+ cmd.append(ssh.host)
+
+ if command:
+ cmd.append(command)
+
+ return cmd
+
+
+def run_ssh_command(
+ args, # type: EnvironmentConfig
+ ssh, # type: SshConnectionDetail
+ options=None, # type: t.Optional[t.Dict[str, t.Union[str, int]]]
+ cli_args=None, # type: t.List[str]
+ command=None, # type: t.Optional[str]
+): # type: (...) -> SshProcess
+ """Run the specified SSH command, returning the created SshProcess instance created."""
+ cmd = create_ssh_command(ssh, options, cli_args, command)
+ env = common_environment()
+
+ cmd_show = ' '.join([cmd_quote(c) for c in cmd])
+ display.info('Run background command: %s' % cmd_show, verbosity=1, truncate=True)
+
+ cmd_bytes = [to_bytes(c) for c in cmd]
+ env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
+
+ if args.explain:
+ process = SshProcess(None)
+ else:
+ process = SshProcess(subprocess.Popen(cmd_bytes, env=env_bytes, bufsize=-1, stdin=devnull(), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
+
+ return process
+
+
+def create_ssh_port_forwards(
+ args, # type: EnvironmentConfig
+ ssh, # type: SshConnectionDetail
+ forwards, # type: t.List[t.Tuple[str, int]]
+): # type: (...) -> SshProcess
+ """
+ Create SSH port forwards using the provided list of tuples (target_host, target_port).
+ Port bindings will be automatically assigned by SSH and must be collected with a subseqent call to collect_port_forwards.
+ """
+ options = dict(
+ LogLevel='INFO', # info level required to get messages on stderr indicating the ports assigned to each forward
+ )
+
+ cli_args = []
+
+ for forward_host, forward_port in forwards:
+ cli_args.extend(['-R', ':'.join([str(0), forward_host, str(forward_port)])])
+
+ process = run_ssh_command(args, ssh, options, cli_args)
+ process.pending_forwards = forwards
+
+ return process
+
+
+def create_ssh_port_redirects(
+ args, # type: EnvironmentConfig
+ ssh, # type: SshConnectionDetail
+ redirects, # type: t.List[t.Tuple[int, str, int]]
+): # type: (...) -> SshProcess
+ """Create SSH port redirections using the provided list of tuples (bind_port, target_host, target_port)."""
+ options = {}
+ cli_args = []
+
+ for bind_port, target_host, target_port in redirects:
+ cli_args.extend(['-R', ':'.join([str(bind_port), target_host, str(target_port)])])
+
+ process = run_ssh_command(args, ssh, options, cli_args)
+
+ return process
+
+
+def generate_ssh_inventory(ssh_connections): # type: (t.List[SshConnectionDetail]) -> str
+ """Return an inventory file in JSON format, created from the provided SSH connection details."""
+ inventory = dict(
+ all=dict(
+ hosts=dict((ssh.name, exclude_none_values(dict(
+ ansible_host=ssh.host,
+ ansible_port=ssh.port,
+ ansible_user=ssh.user,
+ ansible_ssh_private_key_file=os.path.abspath(ssh.identity_file),
+ ansible_connection='ssh',
+ ansible_ssh_pipelining='yes',
+ ansible_python_interpreter=ssh.python_interpreter,
+ ansible_shell_type=ssh.shell_type,
+ ansible_ssh_extra_args='-o UserKnownHostsFile=/dev/null', # avoid changing the test environment
+ ansible_ssh_host_key_checking='no',
+ ))) for ssh in ssh_connections),
+ ),
+ )
+
+ inventory_text = json.dumps(inventory, indent=4, sort_keys=True)
+
+ display.info('>>> SSH Inventory\n%s' % inventory_text, verbosity=3)
+
+ return inventory_text
diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py
index ad6e5ad248..829b489323 100644
--- a/test/lib/ansible_test/_internal/target.py
+++ b/test/lib/ansible_test/_internal/target.py
@@ -614,6 +614,9 @@ class IntegrationTarget(CompletionTarget):
if 'destructive' not in groups:
groups.append('non_destructive')
+ if 'needs/httptester' in groups:
+ groups.append('cloud/httptester') # backwards compatibility for when it was not a cloud plugin
+
if '_' in self.name:
prefix = self.name[:self.name.find('_')]
else:
diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py
index ebc14783f6..08208a97aa 100644
--- a/test/lib/ansible_test/_internal/util.py
+++ b/test/lib/ansible_test/_internal/util.py
@@ -72,6 +72,13 @@ try:
except AttributeError:
MAXFD = -1
+try:
+ TKey = t.TypeVar('TKey')
+ TValue = t.TypeVar('TValue')
+except AttributeError:
+ TKey = None # pylint: disable=invalid-name
+ TValue = None # pylint: disable=invalid-name
+
COVERAGE_CONFIG_NAME = 'coveragerc'
ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -148,6 +155,11 @@ def read_lines_without_comments(path, remove_blank_lines=False, optional=False):
return lines
+def exclude_none_values(data): # type: (t.Dict[TKey, t.Optional[TValue]]) -> t.Dict[TKey, TValue]
+ """Return the provided dictionary with any None values excluded."""
+ return dict((key, value) for key, value in data.items() if value is not None)
+
+
def find_executable(executable, cwd=None, path=None, required=True):
"""
:type executable: str
@@ -365,8 +377,6 @@ def common_environment():
)
optional = (
- 'HTTPTESTER',
- 'KRB5_PASSWORD',
'LD_LIBRARY_PATH',
'SSH_AUTH_SOCK',
# MacOS High Sierra Compatibility
@@ -725,18 +735,6 @@ def parse_to_list_of_dict(pattern, value):
return matched
-def get_available_port():
- """
- :rtype: int
- """
- # this relies on the kernel not reusing previously assigned ports immediately
- socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- with contextlib.closing(socket_fd):
- socket_fd.bind(('', 0))
- return socket_fd.getsockname()[1]
-
-
def get_subclasses(class_type): # type: (t.Type[C]) -> t.Set[t.Type[C]]
"""Returns the set of types that are concrete subclasses of the given type."""
subclasses = set() # type: t.Set[t.Type[C]]
@@ -859,6 +857,21 @@ def open_zipfile(path, mode='r'):
zib_obj.close()
+def sanitize_host_name(name):
+ """Return a sanitized version of the given name, suitable for use as a hostname."""
+ return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-')
+
+
+def devnull():
+ """Return a file descriptor for /dev/null, using a previously cached version if available."""
+ try:
+ return devnull.fd
+ except AttributeError:
+ devnull.fd = os.open('/dev/null', os.O_RDONLY)
+
+ return devnull.fd
+
+
def get_hash(path):
"""
:type path: str
@@ -874,4 +887,20 @@ def get_hash(path):
return file_hash.hexdigest()
+def get_host_ip():
+ """Return the host's IP address."""
+ try:
+ return get_host_ip.ip
+ except AttributeError:
+ pass
+
+ with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
+ sock.connect(('10.255.255.255', 22))
+ host_ip = get_host_ip.ip = sock.getsockname()[0]
+
+ display.info('Detected host IP: %s' % host_ip, verbosity=1)
+
+ return host_ip
+
+
display = Display() # pylint: disable=locally-disabled, invalid-name
diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py
index 40dc68b07a..370a8ca03c 100644
--- a/test/lib/ansible_test/_internal/util_common.py
+++ b/test/lib/ansible_test/_internal/util_common.py
@@ -219,7 +219,7 @@ def named_temporary_file(args, prefix, suffix, directory, content):
:rtype: str
"""
if args.explain:
- yield os.path.join(directory, '%stemp%s' % (prefix, suffix))
+ yield os.path.join(directory or '/tmp', '%stemp%s' % (prefix, suffix))
else:
with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
tempfile_fd.write(to_bytes(content))