summaryrefslogtreecommitdiff
path: root/test/support
diff options
context:
space:
mode:
authorMatt Martz <matt@sivel.net>2020-11-06 08:41:41 -0600
committerGitHub <noreply@github.com>2020-11-06 08:41:41 -0600
commitc8590c7482dcfc40f7054f629b7b6179f9e38daf (patch)
tree057a2840fc035d49ac5f3909d39a691e34c59b08 /test/support
parent599805e316573aeda946f53fc1375b32aa44357b (diff)
downloadansible-c8590c7482dcfc40f7054f629b7b6179f9e38daf.tar.gz
Various intentional tests (#72485)
* Add tests for argspec choices type=list * Add explicit interpreter discovery tests to validate modules returning ansible_facts still set interp * Add explicit tests for missing_required_lib * Add explicit tests for recursive_diff * ci_complete ci_coverage * Update data to cover more code/tests * ci_complete ci_coverage * Add argspec tests for aliases, and no_log * Forgotten file * ci_complete ci_coverage * Add argspec tests for type int * ci_complete ci_coverage * Remove incidental_k8s * ci_complete ci_coverage * fix missing newline * Remove incidental_sts_assume_role * ci_complete ci_coverage
Diffstat (limited to 'test/support')
-rw-r--r--test/support/integration/plugins/module_utils/k8s/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/k8s/common.py290
-rw-r--r--test/support/integration/plugins/module_utils/k8s/raw.py519
-rw-r--r--test/support/integration/plugins/modules/iam_role.py674
-rw-r--r--test/support/integration/plugins/modules/k8s.py274
-rw-r--r--test/support/integration/plugins/modules/k8s_info.py180
-rw-r--r--test/support/integration/plugins/modules/python_requirements_info.py176
-rw-r--r--test/support/integration/plugins/modules/sts_assume_role.py180
8 files changed, 0 insertions, 2293 deletions
diff --git a/test/support/integration/plugins/module_utils/k8s/__init__.py b/test/support/integration/plugins/module_utils/k8s/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/support/integration/plugins/module_utils/k8s/__init__.py
+++ /dev/null
diff --git a/test/support/integration/plugins/module_utils/k8s/common.py b/test/support/integration/plugins/module_utils/k8s/common.py
deleted file mode 100644
index d86659f009..0000000000
--- a/test/support/integration/plugins/module_utils/k8s/common.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2018 Red Hat | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-
-import copy
-import json
-import os
-import traceback
-
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.dict_transformations import recursive_diff
-from ansible.module_utils.six import iteritems, string_types
-from ansible.module_utils._text import to_native
-
-K8S_IMP_ERR = None
-try:
- import kubernetes
- import openshift
- from openshift.dynamic import DynamicClient
- from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError
- HAS_K8S_MODULE_HELPER = True
- k8s_import_exception = None
-except ImportError as e:
- HAS_K8S_MODULE_HELPER = False
- k8s_import_exception = e
- K8S_IMP_ERR = traceback.format_exc()
-
-YAML_IMP_ERR = None
-try:
- import yaml
- HAS_YAML = True
-except ImportError:
- YAML_IMP_ERR = traceback.format_exc()
- HAS_YAML = False
-
-try:
- import urllib3
- urllib3.disable_warnings()
-except ImportError:
- pass
-
-
-def list_dict_str(value):
- if isinstance(value, list):
- return value
- elif isinstance(value, dict):
- return value
- elif isinstance(value, string_types):
- return value
- raise TypeError
-
-
-ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
-
-COMMON_ARG_SPEC = {
- 'state': {
- 'default': 'present',
- 'choices': ['present', 'absent'],
- },
- 'force': {
- 'type': 'bool',
- 'default': False,
- },
- 'resource_definition': {
- 'type': list_dict_str,
- 'aliases': ['definition', 'inline']
- },
- 'src': {
- 'type': 'path',
- },
- 'kind': {},
- 'name': {},
- 'namespace': {},
- 'api_version': {
- 'default': 'v1',
- 'aliases': ['api', 'version'],
- },
-}
-
-AUTH_ARG_SPEC = {
- 'kubeconfig': {
- 'type': 'path',
- },
- 'context': {},
- 'host': {},
- 'api_key': {
- 'no_log': True,
- },
- 'username': {},
- 'password': {
- 'no_log': True,
- },
- 'validate_certs': {
- 'type': 'bool',
- 'aliases': ['verify_ssl'],
- },
- 'ca_cert': {
- 'type': 'path',
- 'aliases': ['ssl_ca_cert'],
- },
- 'client_cert': {
- 'type': 'path',
- 'aliases': ['cert_file'],
- },
- 'client_key': {
- 'type': 'path',
- 'aliases': ['key_file'],
- },
- 'proxy': {},
- 'persist_config': {
- 'type': 'bool',
- },
-}
-
-# Map kubernetes-client parameters to ansible parameters
-AUTH_ARG_MAP = {
- 'kubeconfig': 'kubeconfig',
- 'context': 'context',
- 'host': 'host',
- 'api_key': 'api_key',
- 'username': 'username',
- 'password': 'password',
- 'verify_ssl': 'validate_certs',
- 'ssl_ca_cert': 'ca_cert',
- 'cert_file': 'client_cert',
- 'key_file': 'client_key',
- 'proxy': 'proxy',
- 'persist_config': 'persist_config',
-}
-
-
-class K8sAnsibleMixin(object):
- _argspec_cache = None
-
- @property
- def argspec(self):
- """
- Introspect the model properties, and return an Ansible module arg_spec dict.
- :return: dict
- """
- if self._argspec_cache:
- return self._argspec_cache
- argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
- argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
- self._argspec_cache = argument_spec
- return self._argspec_cache
-
- def get_api_client(self, **auth_params):
- auth_params = auth_params or getattr(self, 'params', {})
- auth = {}
-
- # If authorization variables aren't defined, look for them in environment variables
- for true_name, arg_name in AUTH_ARG_MAP.items():
- if auth_params.get(arg_name) is None:
- env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None)
- if env_value is not None:
- if AUTH_ARG_SPEC[arg_name].get('type') == 'bool':
- env_value = env_value.lower() not in ['0', 'false', 'no']
- auth[true_name] = env_value
- else:
- auth[true_name] = auth_params[arg_name]
-
- def auth_set(*names):
- return all([auth.get(name) for name in names])
-
- if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'):
- # We have enough in the parameters to authenticate, no need to load incluster or kubeconfig
- pass
- elif auth_set('kubeconfig') or auth_set('context'):
- kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
- else:
- # First try to do incluster config, then kubeconfig
- try:
- kubernetes.config.load_incluster_config()
- except kubernetes.config.ConfigException:
- kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
-
- # Override any values in the default configuration with Ansible parameters
- configuration = kubernetes.client.Configuration()
- for key, value in iteritems(auth):
- if key in AUTH_ARG_MAP.keys() and value is not None:
- if key == 'api_key':
- setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
- else:
- setattr(configuration, key, value)
-
- kubernetes.client.Configuration.set_default(configuration)
- return DynamicClient(kubernetes.client.ApiClient(configuration))
-
- def find_resource(self, kind, api_version, fail=False):
- for attribute in ['kind', 'name', 'singular_name']:
- try:
- return self.client.resources.get(**{'api_version': api_version, attribute: kind})
- except (ResourceNotFoundError, ResourceNotUniqueError):
- pass
- try:
- return self.client.resources.get(api_version=api_version, short_names=[kind])
- except (ResourceNotFoundError, ResourceNotUniqueError):
- if fail:
- self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind))
-
- def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None):
- resource = self.find_resource(kind, api_version)
- if not resource:
- return dict(resources=[])
- try:
- result = resource.get(name=name,
- namespace=namespace,
- label_selector=','.join(label_selectors),
- field_selector=','.join(field_selectors)).to_dict()
- except openshift.dynamic.exceptions.NotFoundError:
- return dict(resources=[])
-
- if 'items' in result:
- return dict(resources=result['items'])
- else:
- return dict(resources=[result])
-
- def remove_aliases(self):
- """
- The helper doesn't know what to do with aliased keys
- """
- for k, v in iteritems(self.argspec):
- if 'aliases' in v:
- for alias in v['aliases']:
- if alias in self.params:
- self.params.pop(alias)
-
- def load_resource_definitions(self, src):
- """ Load the requested src path """
- result = None
- path = os.path.normpath(src)
- if not os.path.exists(path):
- self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
- try:
- with open(path, 'r') as f:
- result = list(yaml.safe_load_all(f))
- except (IOError, yaml.YAMLError) as exc:
- self.fail(msg="Error loading resource_definition: {0}".format(exc))
- return result
-
- @staticmethod
- def diff_objects(existing, new):
- result = dict()
- diff = recursive_diff(existing, new)
- if diff:
- result['before'] = diff[0]
- result['after'] = diff[1]
- return not diff, result
-
-
-class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
- resource_definition = None
- api_version = None
- kind = None
-
- def __init__(self, *args, **kwargs):
-
- kwargs['argument_spec'] = self.argspec
- AnsibleModule.__init__(self, *args, **kwargs)
-
- if not HAS_K8S_MODULE_HELPER:
- self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR,
- error=to_native(k8s_import_exception))
- self.openshift_version = openshift.__version__
-
- if not HAS_YAML:
- self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
-
- def execute_module(self):
- raise NotImplementedError()
-
- def fail(self, msg=None):
- self.fail_json(msg=msg)
diff --git a/test/support/integration/plugins/module_utils/k8s/raw.py b/test/support/integration/plugins/module_utils/k8s/raw.py
deleted file mode 100644
index 06272b8158..0000000000
--- a/test/support/integration/plugins/module_utils/k8s/raw.py
+++ /dev/null
@@ -1,519 +0,0 @@
-#
-# Copyright 2018 Red Hat | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-
-import copy
-from datetime import datetime
-from distutils.version import LooseVersion
-import time
-import sys
-import traceback
-
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
-from ansible.module_utils.six import string_types
-from ansible.module_utils.k8s.common import KubernetesAnsibleModule
-from ansible.module_utils.common.dict_transformations import dict_merge
-
-
-try:
- import yaml
- from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing
- import urllib3
-except ImportError:
- # Exceptions handled in common
- pass
-
-try:
- import kubernetes_validate
- HAS_KUBERNETES_VALIDATE = True
-except ImportError:
- HAS_KUBERNETES_VALIDATE = False
-
-K8S_CONFIG_HASH_IMP_ERR = None
-try:
- from openshift.helper.hashes import generate_hash
- HAS_K8S_CONFIG_HASH = True
-except ImportError:
- K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc()
- HAS_K8S_CONFIG_HASH = False
-
-HAS_K8S_APPLY = None
-try:
- from openshift.dynamic.apply import apply_object
- HAS_K8S_APPLY = True
-except ImportError:
- HAS_K8S_APPLY = False
-
-
-class KubernetesRawModule(KubernetesAnsibleModule):
-
- @property
- def validate_spec(self):
- return dict(
- fail_on_error=dict(type='bool'),
- version=dict(),
- strict=dict(type='bool', default=True)
- )
-
- @property
- def condition_spec(self):
- return dict(
- type=dict(),
- status=dict(default=True, choices=[True, False, "Unknown"]),
- reason=dict()
- )
-
- @property
- def argspec(self):
- argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
- argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
- argument_spec['merge_type'] = dict(type='list', choices=['json', 'merge', 'strategic-merge'])
- argument_spec['wait'] = dict(type='bool', default=False)
- argument_spec['wait_sleep'] = dict(type='int', default=5)
- argument_spec['wait_timeout'] = dict(type='int', default=120)
- argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec)
- argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
- argument_spec['append_hash'] = dict(type='bool', default=False)
- argument_spec['apply'] = dict(type='bool', default=False)
- return argument_spec
-
- def __init__(self, k8s_kind=None, *args, **kwargs):
- self.client = None
- self.warnings = []
-
- mutually_exclusive = [
- ('resource_definition', 'src'),
- ('merge_type', 'apply'),
- ]
-
- KubernetesAnsibleModule.__init__(self, *args,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True,
- **kwargs)
- self.kind = k8s_kind or self.params.get('kind')
- self.api_version = self.params.get('api_version')
- self.name = self.params.get('name')
- self.namespace = self.params.get('namespace')
- resource_definition = self.params.get('resource_definition')
- validate = self.params.get('validate')
- if validate:
- if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
- self.fail_json(msg="openshift >= 0.8.0 is required for validate")
- self.append_hash = self.params.get('append_hash')
- if self.append_hash:
- if not HAS_K8S_CONFIG_HASH:
- self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
- exception=K8S_CONFIG_HASH_IMP_ERR)
- if self.params['merge_type']:
- if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
- self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
- self.apply = self.params.get('apply', False)
- if self.apply:
- if not HAS_K8S_APPLY:
- self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply"))
-
- if resource_definition:
- if isinstance(resource_definition, string_types):
- try:
- self.resource_definitions = yaml.safe_load_all(resource_definition)
- except (IOError, yaml.YAMLError) as exc:
- self.fail(msg="Error loading resource_definition: {0}".format(exc))
- elif isinstance(resource_definition, list):
- self.resource_definitions = resource_definition
- else:
- self.resource_definitions = [resource_definition]
- src = self.params.get('src')
- if src:
- self.resource_definitions = self.load_resource_definitions(src)
- try:
- self.resource_definitions = [item for item in self.resource_definitions if item]
- except AttributeError:
- pass
-
- if not resource_definition and not src:
- implicit_definition = dict(
- kind=self.kind,
- apiVersion=self.api_version,
- metadata=dict(name=self.name)
- )
- if self.namespace:
- implicit_definition['metadata']['namespace'] = self.namespace
- self.resource_definitions = [implicit_definition]
-
- def flatten_list_kind(self, list_resource, definitions):
- flattened = []
- parent_api_version = list_resource.group_version if list_resource else None
- parent_kind = list_resource.kind[:-4] if list_resource else None
- for definition in definitions.get('items', []):
- resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
- flattened.append((resource, self.set_defaults(resource, definition)))
- return flattened
-
- def execute_module(self):
- changed = False
- results = []
- try:
- self.client = self.get_api_client()
- # Hopefully the kubernetes client will provide its own exception class one day
- except (urllib3.exceptions.RequestError) as e:
- self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e))
-
- flattened_definitions = []
- for definition in self.resource_definitions:
- kind = definition.get('kind', self.kind)
- api_version = definition.get('apiVersion', self.api_version)
- if kind.endswith('List'):
- resource = self.find_resource(kind, api_version, fail=False)
- flattened_definitions.extend(self.flatten_list_kind(resource, definition))
- else:
- resource = self.find_resource(kind, api_version, fail=True)
- flattened_definitions.append((resource, definition))
-
- for (resource, definition) in flattened_definitions:
- kind = definition.get('kind', self.kind)
- api_version = definition.get('apiVersion', self.api_version)
- definition = self.set_defaults(resource, definition)
- self.warnings = []
- if self.params['validate'] is not None:
- self.warnings = self.validate(definition)
- result = self.perform_action(resource, definition)
- result['warnings'] = self.warnings
- changed = changed or result['changed']
- results.append(result)
-
- if len(results) == 1:
- self.exit_json(**results[0])
-
- self.exit_json(**{
- 'changed': changed,
- 'result': {
- 'results': results
- }
- })
-
- def validate(self, resource):
- def _prepend_resource_info(resource, msg):
- return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
-
- try:
- warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
- except KubernetesValidateMissing:
- self.fail_json(msg="kubernetes-validate python library is required to validate resources")
-
- if errors and self.params['validate']['fail_on_error']:
- self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
- else:
- return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
-
- def set_defaults(self, resource, definition):
- definition['kind'] = resource.kind
- definition['apiVersion'] = resource.group_version
- metadata = definition.get('metadata', {})
- if self.name and not metadata.get('name'):
- metadata['name'] = self.name
- if resource.namespaced and self.namespace and not metadata.get('namespace'):
- metadata['namespace'] = self.namespace
- definition['metadata'] = metadata
- return definition
-
- def perform_action(self, resource, definition):
- result = {'changed': False, 'result': {}}
- state = self.params.get('state', None)
- force = self.params.get('force', False)
- name = definition['metadata'].get('name')
- namespace = definition['metadata'].get('namespace')
- existing = None
- wait = self.params.get('wait')
- wait_sleep = self.params.get('wait_sleep')
- wait_timeout = self.params.get('wait_timeout')
- wait_condition = None
- if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
- wait_condition = self.params['wait_condition']
-
- self.remove_aliases()
-
- try:
- # ignore append_hash for resources other than ConfigMap and Secret
- if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
- name = '%s-%s' % (name, generate_hash(definition))
- definition['metadata']['name'] = name
- params = dict(name=name)
- if namespace:
- params['namespace'] = namespace
- existing = resource.get(**params)
- except NotFoundError:
- # Remove traceback so that it doesn't show up in later failures
- try:
- sys.exc_clear()
- except AttributeError:
- # no sys.exc_clear on python3
- pass
- except ForbiddenError as exc:
- if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
- return self.create_project_request(definition)
- self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
- error=exc.status, status=exc.status, reason=exc.reason)
- except DynamicApiError as exc:
- self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
- error=exc.status, status=exc.status, reason=exc.reason)
-
- if state == 'absent':
- result['method'] = "delete"
- if not existing:
- # The object already does not exist
- return result
- else:
- # Delete the object
- result['changed'] = True
- if not self.check_mode:
- try:
- k8s_obj = resource.delete(**params)
- result['result'] = k8s_obj.to_dict()
- except DynamicApiError as exc:
- self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
- error=exc.status, status=exc.status, reason=exc.reason)
- if wait:
- success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent')
- result['duration'] = duration
- if not success:
- self.fail_json(msg="Resource deletion timed out", **result)
- return result
- else:
- if self.apply:
- if self.check_mode:
- ignored, k8s_obj = apply_object(resource, definition)
- else:
- try:
- k8s_obj = resource.apply(definition, namespace=namespace).to_dict()
- except DynamicApiError as exc:
- msg = "Failed to apply object: {0}".format(exc.body)
- if self.warnings:
- msg += "\n" + "\n ".join(self.warnings)
- self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
- success = True
- result['result'] = k8s_obj
- if wait:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- if existing:
- existing = existing.to_dict()
- else:
- existing = {}
- match, diffs = self.diff_objects(existing, result['result'])
- result['changed'] = not match
- result['diff'] = diffs
- result['method'] = 'apply'
- if not success:
- self.fail_json(msg="Resource apply timed out", **result)
- return result
-
- if not existing:
- if self.check_mode:
- k8s_obj = definition
- else:
- try:
- k8s_obj = resource.create(definition, namespace=namespace).to_dict()
- except ConflictError:
- # Some resources, like ProjectRequests, can't be created multiple times,
- # because the resources that they create don't match their kind
- # In this case we'll mark it as unchanged and warn the user
- self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
- if the resource you are creating does not directly create a resource of the same kind.".format(name))
- return result
- except DynamicApiError as exc:
- msg = "Failed to create object: {0}".format(exc.body)
- if self.warnings:
- msg += "\n" + "\n ".join(self.warnings)
- self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
- success = True
- result['result'] = k8s_obj
- if wait and not self.check_mode:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- result['changed'] = True
- result['method'] = 'create'
- if not success:
- self.fail_json(msg="Resource creation timed out", **result)
- return result
-
- match = False
- diffs = []
-
- if existing and force:
- if self.check_mode:
- k8s_obj = definition
- else:
- try:
- k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict()
- except DynamicApiError as exc:
- msg = "Failed to replace object: {0}".format(exc.body)
- if self.warnings:
- msg += "\n" + "\n ".join(self.warnings)
- self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
- match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
- success = True
- result['result'] = k8s_obj
- if wait:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- match, diffs = self.diff_objects(existing.to_dict(), result['result'])
- result['changed'] = not match
- result['method'] = 'replace'
- result['diff'] = diffs
- if not success:
- self.fail_json(msg="Resource replacement timed out", **result)
- return result
-
- # Differences exist between the existing obj and requested params
- if self.check_mode:
- k8s_obj = dict_merge(existing.to_dict(), definition)
- else:
- if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
- k8s_obj, error = self.patch_resource(resource, definition, existing, name,
- namespace)
- else:
- for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
- k8s_obj, error = self.patch_resource(resource, definition, existing, name,
- namespace, merge_type=merge_type)
- if not error:
- break
- if error:
- self.fail_json(**error)
-
- success = True
- result['result'] = k8s_obj
- if wait:
- success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
- match, diffs = self.diff_objects(existing.to_dict(), result['result'])
- result['changed'] = not match
- result['method'] = 'patch'
- result['diff'] = diffs
-
- if not success:
- self.fail_json(msg="Resource update timed out", **result)
- return result
-
- def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
- try:
- params = dict(name=name, namespace=namespace)
- if merge_type:
- params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
- k8s_obj = resource.patch(definition, **params).to_dict()
- match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
- error = {}
- return k8s_obj, {}
- except DynamicApiError as exc:
- msg = "Failed to patch object: {0}".format(exc.body)
- if self.warnings:
- msg += "\n" + "\n ".join(self.warnings)
- error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
- return None, error
-
- def create_project_request(self, definition):
- definition['kind'] = 'ProjectRequest'
- result = {'changed': False, 'result': {}}
- resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
- if not self.check_mode:
- try:
- k8s_obj = resource.create(definition)
- result['result'] = k8s_obj.to_dict()
- except DynamicApiError as exc:
- self.fail_json(msg="Failed to create object: {0}".format(exc.body),
- error=exc.status, status=exc.status, reason=exc.reason)
- result['changed'] = True
- result['method'] = 'create'
- return result
-
- def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state):
- start = datetime.now()
-
- def _wait_for_elapsed():
- return (datetime.now() - start).seconds
-
- response = None
- while _wait_for_elapsed() < timeout:
- try:
- response = resource.get(name=name, namespace=namespace)
- if predicate(response):
- if response:
- return True, response.to_dict(), _wait_for_elapsed()
- else:
- return True, {}, _wait_for_elapsed()
- time.sleep(sleep)
- except NotFoundError:
- if state == 'absent':
- return True, {}, _wait_for_elapsed()
- if response:
- response = response.to_dict()
- return False, response, _wait_for_elapsed()
-
- def wait(self, resource, definition, sleep, timeout, state='present', condition=None):
-
- def _deployment_ready(deployment):
- # FIXME: frustratingly bool(deployment.status) is True even if status is empty
- # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
- return (deployment.status and deployment.status.replicas is not None and
- deployment.status.availableReplicas == deployment.status.replicas and
- deployment.status.observedGeneration == deployment.metadata.generation)
-
- def _pod_ready(pod):
- return (pod.status and pod.status.containerStatuses is not None and
- all([container.ready for container in pod.status.containerStatuses]))
-
- def _daemonset_ready(daemonset):
- return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and
- daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and
- daemonset.status.observedGeneration == daemonset.metadata.generation)
-
- def _custom_condition(resource):
- if not resource.status or not resource.status.conditions:
- return False
- match = [x for x in resource.status.conditions if x.type == condition['type']]
- if not match:
- return False
- # There should never be more than one condition of a specific type
- match = match[0]
- if match.status == 'Unknown':
- if match.status == condition['status']:
- if 'reason' not in condition:
- return True
- if condition['reason']:
- return match.reason == condition['reason']
- return False
- status = True if match.status == 'True' else False
- if status == condition['status']:
- if condition.get('reason'):
- return match.reason == condition['reason']
- return True
- return False
-
- def _resource_absent(resource):
- return not resource
-
- waiter = dict(
- Deployment=_deployment_ready,
- DaemonSet=_daemonset_ready,
- Pod=_pod_ready
- )
- kind = definition['kind']
- if state == 'present' and not condition:
- predicate = waiter.get(kind, lambda x: x)
- elif state == 'present' and condition:
- predicate = _custom_condition
- else:
- predicate = _resource_absent
- return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state)
diff --git a/test/support/integration/plugins/modules/iam_role.py b/test/support/integration/plugins/modules/iam_role.py
deleted file mode 100644
index bd666f249c..0000000000
--- a/test/support/integration/plugins/modules/iam_role.py
+++ /dev/null
@@ -1,674 +0,0 @@
-#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_role
-short_description: Manage AWS IAM roles
-description:
- - Manage AWS IAM roles.
-version_added: "2.3"
-author: "Rob White (@wimnat)"
-options:
- path:
- description:
- - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
- default: "/"
- type: str
- name:
- description:
- - The name of the role to create.
- required: true
- type: str
- description:
- description:
- - Provides a description of the role.
- version_added: "2.5"
- type: str
- boundary:
- description:
- - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
- - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
- - This is intended for roles/users that have permissions to create new IAM objects.
- - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
- - Requires botocore 1.10.57 or above.
- aliases: [boundary_policy_arn]
- version_added: "2.7"
- type: str
- assume_role_policy_document:
- description:
- - The trust relationship policy document that grants an entity permission to assume the role.
- - This parameter is required when I(state=present).
- type: json
- managed_policies:
- description:
- - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
- - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
- - To embed an inline policy, use M(iam_policy).
- aliases: ['managed_policy']
- type: list
- max_session_duration:
- description:
- - The maximum duration (in seconds) of a session when assuming the role.
- - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
- version_added: "2.10"
- type: int
- purge_policies:
- description:
- - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false).
- version_added: "2.5"
- type: bool
- aliases: ['purge_policy', 'purge_managed_policies']
- state:
- description:
- - Create or remove the IAM role.
- default: present
- choices: [ present, absent ]
- type: str
- create_instance_profile:
- description:
- - Creates an IAM instance profile along with the role.
- default: true
- version_added: "2.5"
- type: bool
- delete_instance_profile:
- description:
- - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
- profile created with the same I(name) as the role.
- - Only applies when I(state=absent).
- default: false
- version_added: "2.10"
- type: bool
- tags:
- description:
- - Tag dict to apply to the queue.
- - Requires botocore 1.12.46 or above.
- version_added: "2.10"
- type: dict
- purge_tags:
- description:
- - Remove tags not listed in I(tags) when tags is specified.
- default: true
- version_added: "2.10"
- type: bool
-requirements: [ botocore, boto3 ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create a role with description and tags
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- description: This is My New Role
- tags:
- env: dev
-
-- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- managed_policies:
- - arn:aws:iam::aws:policy/PowerUserAccess
-
-- name: Keep the role created above but remove all managed policies
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- managed_policies: []
-
-- name: Delete the role
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
- state: absent
-
-'''
-RETURN = '''
-iam_role:
- description: dictionary containing the IAM Role data
- returned: success
- type: complex
- contains:
- path:
- description: the path to the role
- type: str
- returned: always
- sample: /
- role_name:
- description: the friendly name that identifies the role
- type: str
- returned: always
- sample: myrole
- role_id:
- description: the stable and unique string identifying the role
- type: str
- returned: always
- sample: ABCDEFF4EZ4ABCDEFV4ZC
- arn:
- description: the Amazon Resource Name (ARN) specifying the role
- type: str
- returned: always
- sample: "arn:aws:iam::1234567890:role/mynewrole"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the role was created
- type: str
- returned: always
- sample: "2016-08-14T04:36:28+00:00"
- assume_role_policy_document:
- description: the policy that grants an entity permission to assume the role
- type: str
- returned: always
- sample: {
- 'statement': [
- {
- 'action': 'sts:AssumeRole',
- 'effect': 'Allow',
- 'principal': {
- 'service': 'ec2.amazonaws.com'
- },
- 'sid': ''
- }
- ],
- 'version': '2012-10-17'
- }
- attached_policies:
- description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
- type: list
- returned: always
- sample: [
- {
- 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
- 'policy_name': 'PowerUserAccess'
- }
- ]
- tags:
- description: role tags
- type: dict
- returned: always
- sample: '{"Env": "Prod"}'
-'''
-
-import json
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
-from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
- if not compare_policies(current_policy_doc, json.loads(new_policy_doc)):
- return True
- else:
- return False
-
-
-@AWSRetry.jittered_backoff()
-def _list_policies(connection):
- paginator = connection.get_paginator('list_policies')
- return paginator.paginate().build_full_result()['Policies']
-
-
-def convert_friendly_names_to_arns(connection, module, policy_names):
- if not any([not policy.startswith('arn:') for policy in policy_names]):
- return policy_names
- allpolicies = {}
- policies = _list_policies(connection)
-
- for policy in policies:
- allpolicies[policy['PolicyName']] = policy['Arn']
- allpolicies[policy['Arn']] = policy['Arn']
- try:
- return [allpolicies[policy] for policy in policy_names]
- except KeyError as e:
- module.fail_json_aws(e, msg="Couldn't find policy")
-
-
-def attach_policies(connection, module, policies_to_attach, params):
- changed = False
- for policy_arn in policies_to_attach:
- try:
- if not module.check_mode:
- connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName']))
- changed = True
- return changed
-
-
-def remove_policies(connection, module, policies_to_remove, params):
- changed = False
- for policy in policies_to_remove:
- try:
- if not module.check_mode:
- connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName']))
- changed = True
- return changed
-
-
-def generate_create_params(module):
- params = dict()
- params['Path'] = module.params.get('path')
- params['RoleName'] = module.params.get('name')
- params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
- if module.params.get('description') is not None:
- params['Description'] = module.params.get('description')
- if module.params.get('max_session_duration') is not None:
- params['MaxSessionDuration'] = module.params.get('max_session_duration')
- if module.params.get('boundary') is not None:
- params['PermissionsBoundary'] = module.params.get('boundary')
- if module.params.get('tags') is not None:
- params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
-
- return params
-
-
-def create_basic_role(connection, module, params):
- """
- Perform the Role creation.
- Assumes tests for the role existing have already been performed.
- """
-
- try:
- if not module.check_mode:
- role = connection.create_role(aws_retry=True, **params)
- # 'Description' is documented as key of the role returned by create_role
- # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
- # Get the role after creating it.
- role = get_role_with_backoff(connection, module, params['RoleName'])
- else:
- role = {'MadeInCheckMode': True}
- role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to create role")
-
- return role
-
-
-def update_role_assumed_policy(connection, module, params, role):
- # Check Assumed Policy document
- if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.update_assume_role_policy(
- RoleName=params['RoleName'],
- PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])),
- aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName']))
- return True
-
-
-def update_role_description(connection, module, params, role):
- # Check Description update
- if params.get('Description') is None:
- return False
- if role.get('Description') == params['Description']:
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName']))
- return True
-
-
-def update_role_max_session_duration(connection, module, params, role):
- # Check MaxSessionDuration update
- if params.get('MaxSessionDuration') is None:
- return False
- if role.get('MaxSessionDuration') == params['MaxSessionDuration']:
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName']))
- return True
-
-
-def update_role_permissions_boundary(connection, module, params, role):
- # Check PermissionsBoundary
- if params.get('PermissionsBoundary') is None:
- return False
- if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''):
- return False
-
- if module.check_mode:
- return True
-
- if params.get('PermissionsBoundary') == '':
- try:
- connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName']))
- else:
- try:
- connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName']))
- return True
-
-
-def update_managed_policies(connection, module, params, role, managed_policies, purge_policies):
- # Check Managed Policies
- if managed_policies is None:
- return False
-
- # If we're manipulating a fake role
- if role.get('MadeInCheckMode', False):
- role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies))
- return True
-
- # Get list of current attached managed policies
- current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
- current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
-
- if len(managed_policies) == 1 and managed_policies[0] is None:
- managed_policies = []
-
- policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
- policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
-
- changed = False
-
- if purge_policies:
- changed |= remove_policies(connection, module, policies_to_remove, params)
-
- changed |= attach_policies(connection, module, policies_to_attach, params)
-
- return changed
-
-
-def create_or_update_role(connection, module):
-
- params = generate_create_params(module)
- role_name = params['RoleName']
- create_instance_profile = module.params.get('create_instance_profile')
- purge_policies = module.params.get('purge_policies')
- if purge_policies is None:
- purge_policies = True
- managed_policies = module.params.get('managed_policies')
- if managed_policies:
- # Attempt to list the policies early so we don't leave things behind if we can't find them.
- managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
-
- changed = False
-
- # Get role
- role = get_role(connection, module, role_name)
-
- # If role is None, create it
- if role is None:
- role = create_basic_role(connection, module, params)
- changed = True
- else:
- changed |= update_role_tags(connection, module, params, role)
- changed |= update_role_assumed_policy(connection, module, params, role)
- changed |= update_role_description(connection, module, params, role)
- changed |= update_role_max_session_duration(connection, module, params, role)
- changed |= update_role_permissions_boundary(connection, module, params, role)
-
- if create_instance_profile:
- changed |= create_instance_profiles(connection, module, params, role)
-
- changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies)
-
- # Get the role again
- if not role.get('MadeInCheckMode', False):
- role = get_role(connection, module, params['RoleName'])
- role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName'])
- role['tags'] = get_role_tags(connection, module)
-
- module.exit_json(
- changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']),
- **camel_dict_to_snake_dict(role, ignore_list=['tags']))
-
-
-def create_instance_profiles(connection, module, params, role):
-
- if role.get('MadeInCheckMode', False):
- return False
-
- # Fetch existing Profiles
- try:
- instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
-
- # Profile already exists
- if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
- return False
-
- if module.check_mode:
- return True
-
- # Make sure an instance profile is created
- try:
- connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True)
- except ClientError as e:
- # If the profile already exists, no problem, move on.
- # Implies someone's changing things at the same time...
- if e.response['Error']['Code'] == 'EntityAlreadyExists':
- return False
- else:
- module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
-
- # And attach the role to the profile
- try:
- connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName']))
-
- return True
-
-
-def remove_instance_profiles(connection, module, role_params, role):
- role_name = module.params.get('name')
- delete_profiles = module.params.get("delete_instance_profile")
-
- try:
- instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
-
- # Remove the role from the instance profile(s)
- for profile in instance_profiles:
- profile_name = profile['InstanceProfileName']
- try:
- if not module.check_mode:
- connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params)
- if profile_name == role_name:
- if delete_profiles:
- try:
- connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
-
-
-def destroy_role(connection, module):
-
- role_name = module.params.get('name')
- role = get_role(connection, module, role_name)
- role_params = dict()
- role_params['RoleName'] = role_name
- boundary_params = dict(role_params)
- boundary_params['PermissionsBoundary'] = ''
-
- if role is None:
- module.exit_json(changed=False)
-
- # Before we try to delete the role we need to remove any
- # - attached instance profiles
- # - attached managed policies
- # - permissions boundary
- remove_instance_profiles(connection, module, role_params, role)
- update_managed_policies(connection, module, role_params, role, [], True)
- update_role_permissions_boundary(connection, module, boundary_params, role)
-
- try:
- if not module.check_mode:
- connection.delete_role(aws_retry=True, **role_params)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete role")
-
- module.exit_json(changed=True)
-
-
-def get_role_with_backoff(connection, module, name):
- try:
- return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
-
-
-def get_role(connection, module, name):
- try:
- return connection.get_role(RoleName=name, aws_retry=True)['Role']
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return None
- else:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
-
-
-def get_attached_policy_list(connection, module, name):
- try:
- return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
-
-
-def get_role_tags(connection, module):
- role_name = module.params.get('name')
- if not hasattr(connection, 'list_role_tags'):
- return {}
- try:
- return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
-
-
-def update_role_tags(connection, module, params, role):
- new_tags = params.get('Tags')
- if new_tags is None:
- return False
- new_tags = boto3_tag_list_to_ansible_dict(new_tags)
-
- role_name = module.params.get('name')
- purge_tags = module.params.get('purge_tags')
-
- try:
- existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
- except (ClientError, KeyError):
- existing_tags = {}
-
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
-
- if not module.check_mode:
- try:
- if tags_to_remove:
- connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
- if tags_to_add:
- connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
-
- changed = bool(tags_to_add) or bool(tags_to_remove)
- return changed
-
-
-def main():
-
- argument_spec = dict(
- name=dict(type='str', required=True),
- path=dict(type='str', default="/"),
- assume_role_policy_document=dict(type='json'),
- managed_policies=dict(type='list', aliases=['managed_policy']),
- max_session_duration=dict(type='int'),
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- description=dict(type='str'),
- boundary=dict(type='str', aliases=['boundary_policy_arn']),
- create_instance_profile=dict(type='bool', default=True),
- delete_instance_profile=dict(type='bool', default=False),
- purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=True),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[('state', 'present', ['assume_role_policy_document'])],
- supports_check_mode=True)
-
- if module.params.get('purge_policies') is None:
- module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.'
- ' To maintain the existing behaviour explicity set purge_policies=true',
- version='2.14', collection_name='ansible.builtin')
-
- if module.params.get('boundary'):
- if module.params.get('create_instance_profile'):
- module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
- if not module.params.get('boundary').startswith('arn:aws:iam'):
- module.fail_json(msg="Boundary policy must be an ARN")
- if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'):
- module.fail_json(msg="When managing tags botocore must be at least v1.12.46. "
- "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
- if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'):
- module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. "
- "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
- if module.params.get('max_session_duration'):
- max_session_duration = module.params.get('max_session_duration')
- if max_session_duration < 3600 or max_session_duration > 43200:
- module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
- if module.params.get('path'):
- path = module.params.get('path')
- if not path.endswith('/') or not path.startswith('/'):
- module.fail_json(msg="path must begin and end with /")
-
- connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_role(connection, module)
- else:
- destroy_role(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/k8s.py b/test/support/integration/plugins/modules/k8s.py
deleted file mode 100644
index f3938bf39c..0000000000
--- a/test/support/integration/plugins/modules/k8s.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2018, Chris Houseknecht <@chouseknecht>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-
-module: k8s
-
-short_description: Manage Kubernetes (K8s) objects
-
-version_added: "2.6"
-
-author:
- - "Chris Houseknecht (@chouseknecht)"
- - "Fabian von Feilitzsch (@fabianvf)"
-
-description:
- - Use the OpenShift Python client to perform CRUD operations on K8s objects.
- - Pass the object definition from a source file or inline. See examples for reading
- files and using Jinja templates or vault-encrypted files.
- - Access to the full range of K8s APIs.
- - Use the M(k8s_info) module to obtain a list of items about an object of type C(kind)
- - Authenticate using either a config file, certificates, password or token.
- - Supports check mode.
-
-extends_documentation_fragment:
- - k8s_state_options
- - k8s_name_options
- - k8s_resource_options
- - k8s_auth_options
-
-notes:
- - If your OpenShift Python library is not 0.9.0 or newer and you are trying to
- remove an item from an associative array/dictionary, for example a label or
- an annotation, you will need to explicitly set the value of the item to be
- removed to `null`. Simply deleting the entry in the dictionary will not
- remove it from openshift or kubernetes.
-
-options:
- merge_type:
- description:
- - Whether to override the default patch merge approach with a specific type. By default, the strategic
- merge will typically be used.
- - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
- want to use C(merge) if you see "strategic merge patch format is not supported"
- - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- - Requires openshift >= 0.6.2
- - If more than one merge_type is given, the merge_types will be tried in order
- - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
- on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
- is simply C(strategic-merge).
- - mutually exclusive with C(apply)
- choices:
- - json
- - merge
- - strategic-merge
- type: list
- version_added: "2.7"
- wait:
- description:
- - Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
- received the request
- - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
- - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
- default: no
- type: bool
- version_added: "2.8"
- wait_sleep:
- description:
- - Number of seconds to sleep between checks.
- default: 5
- version_added: "2.9"
- wait_timeout:
- description:
- - How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
- default: 120
- version_added: "2.8"
- wait_condition:
- description:
- - Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False.
- suboptions:
- type:
- description:
- - The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others)
- - Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored.
- - The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
- for a given resource to see possible choices.
- status:
- description:
- - The value of the status field in your desired condition.
- - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
- choices:
- - True
- - False
- - Unknown
- reason:
- description:
- - The value of the reason field in your desired condition
- - For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
- - The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
- for a given resource to see possible choices.
- version_added: "2.8"
- validate:
- description:
- - how (if at all) to validate the resource definition against the kubernetes schema.
- Requires the kubernetes-validate python module
- suboptions:
- fail_on_error:
- description: whether to fail on validation errors.
- required: yes
- type: bool
- version:
- description: version of Kubernetes to validate against. defaults to Kubernetes server version
- strict:
- description: whether to fail when passing unexpected properties
- default: no
- type: bool
- version_added: "2.8"
- append_hash:
- description:
- - Whether to append a hash to a resource name for immutability purposes
- - Applies only to ConfigMap and Secret resources
- - The parameter will be silently ignored for other resource kinds
- - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
- will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
- the generated hash and append_hash=no)
- type: bool
- version_added: "2.8"
- apply:
- description:
- - C(apply) compares the desired resource definition with the previously supplied resource definition,
- ignoring properties that are automatically generated
- - C(apply) works better with Services than 'force=yes'
- - mutually exclusive with C(merge_type)
- type: bool
- version_added: "2.9"
-
-requirements:
- - "python >= 2.7"
- - "openshift >= 0.6"
- - "PyYAML >= 3.11"
-'''
-
-EXAMPLES = '''
-- name: Create a k8s namespace
- k8s:
- name: testing
- api_version: v1
- kind: Namespace
- state: present
-
-- name: Create a Service object from an inline definition
- k8s:
- state: present
- definition:
- apiVersion: v1
- kind: Service
- metadata:
- name: web
- namespace: testing
- labels:
- app: galaxy
- service: web
- spec:
- selector:
- app: galaxy
- service: web
- ports:
- - protocol: TCP
- targetPort: 8000
- name: port-8000-tcp
- port: 8000
-
-- name: Remove an existing Service object
- k8s:
- state: absent
- api_version: v1
- kind: Service
- namespace: testing
- name: web
-
-# Passing the object definition from a file
-
-- name: Create a Deployment by reading the definition from a local file
- k8s:
- state: present
- src: /testing/deployment.yml
-
-- name: >-
- Read definition file from the Ansible controller file system.
- If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
- k8s:
- state: present
- definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
-
-- name: Read definition file from the Ansible controller file system after Jinja templating
- k8s:
- state: present
- definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
-
-- name: fail on validation errors
- k8s:
- state: present
- definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
- validate:
- fail_on_error: yes
-
-- name: warn on validation errors, check for unexpected properties
- k8s:
- state: present
- definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
- validate:
- fail_on_error: no
- strict: yes
-'''
-
-RETURN = '''
-result:
- description:
- - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
- returned: success
- type: complex
- contains:
- api_version:
- description: The versioned schema of this representation of an object.
- returned: success
- type: str
- kind:
- description: Represents the REST resource this object represents.
- returned: success
- type: str
- metadata:
- description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
- returned: success
- type: complex
- spec:
- description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
- returned: success
- type: complex
- status:
- description: Current status details for the object.
- returned: success
- type: complex
- items:
- description: Returned only when multiple yaml documents are passed to src or resource_definition
- returned: when resource_definition or src contains list of objects
- type: list
- duration:
- description: elapsed time of task in seconds
- returned: when C(wait) is true
- type: int
- sample: 48
-'''
-
-from ansible.module_utils.k8s.raw import KubernetesRawModule
-
-
-def main():
- KubernetesRawModule().execute_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/k8s_info.py b/test/support/integration/plugins/modules/k8s_info.py
deleted file mode 100644
index f480bcedc3..0000000000
--- a/test/support/integration/plugins/modules/k8s_info.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2018, Will Thames <@willthames>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: k8s_info
-
-short_description: Describe Kubernetes (K8s) objects
-
-version_added: "2.7"
-
-author:
- - "Will Thames (@willthames)"
-
-description:
- - Use the OpenShift Python client to perform read operations on K8s objects.
- - Access to the full range of K8s APIs.
- - Authenticate using either a config file, certificates, password or token.
- - Supports check mode.
- - This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
-
-options:
- api_version:
- description:
- - Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
- specific object.
- default: v1
- aliases:
- - api
- - version
- kind:
- description:
- - Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
- specific object.
- required: yes
- name:
- description:
- - Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
- specific object.
- namespace:
- description:
- - Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
- to identify a specific object.
- label_selectors:
- description: List of label selectors to use to filter results
- field_selectors:
- description: List of field selectors to use to filter results
-
-extends_documentation_fragment:
- - k8s_auth_options
-
-requirements:
- - "python >= 2.7"
- - "openshift >= 0.6"
- - "PyYAML >= 3.11"
-'''
-
-EXAMPLES = '''
-- name: Get an existing Service object
- k8s_info:
- api_version: v1
- kind: Service
- name: web
- namespace: testing
- register: web_service
-
-- name: Get a list of all service objects
- k8s_info:
- api_version: v1
- kind: Service
- namespace: testing
- register: service_list
-
-- name: Get a list of all pods from any namespace
- k8s_info:
- kind: Pod
- register: pod_list
-
-- name: Search for all Pods labelled app=web
- k8s_info:
- kind: Pod
- label_selectors:
- - app = web
- - tier in (dev, test)
-
-- name: Search for all running pods
- k8s_info:
- kind: Pod
- field_selectors:
- - status.phase=Running
-'''
-
-RETURN = '''
-resources:
- description:
- - The object(s) that exists
- returned: success
- type: complex
- contains:
- api_version:
- description: The versioned schema of this representation of an object.
- returned: success
- type: str
- kind:
- description: Represents the REST resource this object represents.
- returned: success
- type: str
- metadata:
- description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
- returned: success
- type: dict
- spec:
- description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
- returned: success
- type: dict
- status:
- description: Current status details for the object.
- returned: success
- type: dict
-'''
-
-
-from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
-import copy
-
-
-class KubernetesInfoModule(KubernetesAnsibleModule):
-
- def __init__(self, *args, **kwargs):
- KubernetesAnsibleModule.__init__(self, *args,
- supports_check_mode=True,
- **kwargs)
- if self._name == 'k8s_facts':
- self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'",
- version='2.13', collection_name='ansible.builtin')
-
- def execute_module(self):
- self.client = self.get_api_client()
-
- self.exit_json(changed=False,
- **self.kubernetes_facts(self.params['kind'],
- self.params['api_version'],
- self.params['name'],
- self.params['namespace'],
- self.params['label_selectors'],
- self.params['field_selectors']))
-
- @property
- def argspec(self):
- args = copy.deepcopy(AUTH_ARG_SPEC)
- args.update(
- dict(
- kind=dict(required=True),
- api_version=dict(default='v1', aliases=['api', 'version']),
- name=dict(),
- namespace=dict(),
- label_selectors=dict(type='list', default=[]),
- field_selectors=dict(type='list', default=[]),
- )
- )
- return args
-
-
-def main():
- KubernetesInfoModule().execute_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/python_requirements_info.py b/test/support/integration/plugins/modules/python_requirements_info.py
deleted file mode 100644
index 5b5c3e50fa..0000000000
--- a/test/support/integration/plugins/modules/python_requirements_info.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
-module: python_requirements_info
-short_description: Show python path and assert dependency versions
-description:
- - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
- - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.7"
-options:
- dependencies:
- description: >
- A list of version-likes or module names to check for installation.
- Supported operators: <, >, <=, >=, or ==. The bare module name like
- I(ansible), the module with a specific version like I(boto3==1.6.1), or a
- partial version like I(requests>2) are all valid specifications.
-author:
-- Will Thames (@willthames)
-- Ryan Scott Brown (@ryansb)
-'''
-
-EXAMPLES = '''
-- name: show python lib/site paths
- python_requirements_info:
-- name: check for modern boto3 and botocore versions
- python_requirements_info:
- dependencies:
- - boto3>1.6
- - botocore<2
-'''
-
-RETURN = '''
-python:
- description: path to python version used
- returned: always
- type: str
- sample: /usr/local/opt/python@2/bin/python2.7
-python_version:
- description: version of python
- returned: always
- type: str
- sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
-python_system_path:
- description: List of paths python is looking for modules in
- returned: always
- type: list
- sample:
- - /usr/local/opt/python@2/site-packages/
- - /usr/lib/python/site-packages/
- - /usr/lib/python/site-packages/
-valid:
- description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
- returned: always
- type: dict
- sample:
- boto3:
- desired: null
- installed: 1.7.60
- botocore:
- desired: botocore<2
- installed: 1.10.60
-mismatched:
- description: A dictionary of dependencies that did not satisfy the desired version
- returned: always
- type: dict
- sample:
- botocore:
- desired: botocore>2
- installed: 1.10.60
-not_found:
- description: A list of packages that could not be imported at all, and are not installed
- returned: always
- type: list
- sample:
- - boto4
- - requests
-'''
-
-import re
-import sys
-import operator
-
-HAS_DISTUTILS = False
-try:
- import pkg_resources
- from distutils.version import LooseVersion
- HAS_DISTUTILS = True
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-
-operations = {
- '<=': operator.le,
- '>=': operator.ge,
- '<': operator.lt,
- '>': operator.gt,
- '==': operator.eq,
-}
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- dependencies=dict(type='list')
- ),
- supports_check_mode=True,
- )
- if module._name == 'python_requirements_facts':
- module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
- version='2.13', collection_name='ansible.builtin')
- if not HAS_DISTUTILS:
- module.fail_json(
- msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
- python=sys.executable,
- python_version=sys.version,
- python_system_path=sys.path,
- )
- pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
-
- results = dict(
- not_found=[],
- mismatched={},
- valid={},
- )
-
- for dep in (module.params.get('dependencies') or []):
- match = pkg_dep_re.match(dep)
- if match is None:
- module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
- pkg, op, version = match.groups()
- if op is not None and op not in operations:
- module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
- try:
- existing = pkg_resources.get_distribution(pkg).version
- except pkg_resources.DistributionNotFound:
- # not there
- results['not_found'].append(pkg)
- continue
- if op is None and version is None:
- results['valid'][pkg] = {
- 'installed': existing,
- 'desired': None,
- }
- elif operations[op](LooseVersion(existing), LooseVersion(version)):
- results['valid'][pkg] = {
- 'installed': existing,
- 'desired': dep,
- }
- else:
- results['mismatched'] = {
- 'installed': existing,
- 'desired': dep,
- }
-
- module.exit_json(
- python=sys.executable,
- python_version=sys.version,
- python_system_path=sys.path,
- **results
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/sts_assume_role.py b/test/support/integration/plugins/modules/sts_assume_role.py
deleted file mode 100644
index cd82a549cb..0000000000
--- a/test/support/integration/plugins/modules/sts_assume_role.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: sts_assume_role
-short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
-description:
- - Assume a role using AWS Security Token Service and obtain temporary credentials.
-version_added: "2.0"
-author:
- - Boris Ekelchik (@bekelchik)
- - Marek Piatek (@piontas)
-options:
- role_arn:
- description:
- - The Amazon Resource Name (ARN) of the role that the caller is
- assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
- required: true
- type: str
- role_session_name:
- description:
- - Name of the role's session - will be used by CloudTrail.
- required: true
- type: str
- policy:
- description:
- - Supplemental policy to use in addition to assumed role's policies.
- type: str
- duration_seconds:
- description:
- - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
- - The max depends on the IAM role's sessions duration setting.
- - By default, the value is set to 3600 seconds.
- type: int
- external_id:
- description:
- - A unique identifier that is used by third parties to assume a role in their customers' accounts.
- type: str
- mfa_serial_number:
- description:
- - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
- type: str
- mfa_token:
- description:
- - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
- type: str
-notes:
- - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
- - python >= 2.6
-'''
-
-RETURN = '''
-sts_creds:
- description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
- returned: always
- type: dict
- sample:
- access_key: XXXXXXXXXXXXXXXXXXXX
- expiration: 2017-11-11T11:11:11+00:00
- secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-sts_user:
- description: The Amazon Resource Name (ARN) and the assumed role ID
- returned: always
- type: dict
- sample:
- assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
- arn: ARO123EXAMPLE123:Bob
-changed:
- description: True if obtaining the credentials succeeds
- type: bool
- returned: always
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
-- sts_assume_role:
- role_arn: "arn:aws:iam::123456789012:role/someRole"
- role_session_name: "someRoleSession"
- register: assumed_role
-
-# Use the assumed role above to tag an instance in account 123456789012
-- ec2_tag:
- aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
- aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
- security_token: "{{ assumed_role.sts_creds.session_token }}"
- resource: i-xyzxyz01
- state: present
- tags:
- MyNewTag: value
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, ParamValidationError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def _parse_response(response):
- credentials = response.get('Credentials', {})
- user = response.get('AssumedRoleUser', {})
-
- sts_cred = {
- 'access_key': credentials.get('AccessKeyId'),
- 'secret_key': credentials.get('SecretAccessKey'),
- 'session_token': credentials.get('SessionToken'),
- 'expiration': credentials.get('Expiration')
-
- }
- sts_user = camel_dict_to_snake_dict(user)
- return sts_cred, sts_user
-
-
-def assume_role_policy(connection, module):
- params = {
- 'RoleArn': module.params.get('role_arn'),
- 'RoleSessionName': module.params.get('role_session_name'),
- 'Policy': module.params.get('policy'),
- 'DurationSeconds': module.params.get('duration_seconds'),
- 'ExternalId': module.params.get('external_id'),
- 'SerialNumber': module.params.get('mfa_serial_number'),
- 'TokenCode': module.params.get('mfa_token')
- }
- changed = False
-
- kwargs = dict((k, v) for k, v in params.items() if v is not None)
-
- try:
- response = connection.assume_role(**kwargs)
- changed = True
- except (ClientError, ParamValidationError) as e:
- module.fail_json_aws(e)
-
- sts_cred, sts_user = _parse_response(response)
- module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
-
-
-def main():
- argument_spec = dict(
- role_arn=dict(required=True),
- role_session_name=dict(required=True),
- duration_seconds=dict(required=False, default=None, type='int'),
- external_id=dict(required=False, default=None),
- policy=dict(required=False, default=None),
- mfa_serial_number=dict(required=False, default=None),
- mfa_token=dict(required=False, default=None)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- connection = module.client('sts')
-
- assume_role_policy(connection, module)
-
-
-if __name__ == '__main__':
- main()