summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorSloane Hertel <19572925+s-hertel@users.noreply.github.com>2022-05-31 14:30:17 -0400
committerGitHub <noreply@github.com>2022-05-31 14:30:17 -0400
commite6075109d0374d1ea476a25043c69ec2bdfee365 (patch)
treea7a3a01eb29916c7467a1ed38111a178f0e33c5e /test
parentbd849b30762cf62135fa243b9788c9497a5964e2 (diff)
downloadansible-e6075109d0374d1ea476a25043c69ec2bdfee365.tar.gz
Remove incidental_inventory_aws_ec2 and supporting plugins (#77877)
* Remove aws/2.7 and aws/3.6 from incidental test matrix entries * Remove aws.sh symlink
Diffstat (limited to 'test')
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/aliases3
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml11
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml9
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml64
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml62
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml39
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml9
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml18
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml91
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml79
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml74
-rwxr-xr-xtest/integration/targets/incidental_inventory_aws_ec2/runme.sh39
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml12
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml12
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml20
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml0
-rw-r--r--test/sanity/ignore.txt7
-rw-r--r--test/support/integration/plugins/inventory/aws_ec2.py760
-rw-r--r--test/support/integration/plugins/module_utils/aws/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/aws/core.py335
-rw-r--r--test/support/integration/plugins/module_utils/aws/iam.py49
-rw-r--r--test/support/integration/plugins/module_utils/aws/s3.py50
-rw-r--r--test/support/integration/plugins/module_utils/aws/waiters.py405
-rw-r--r--test/support/integration/plugins/module_utils/cloud.py217
-rw-r--r--test/support/integration/plugins/module_utils/ec2.py758
-rw-r--r--test/support/integration/plugins/modules/ec2.py1766
-rw-r--r--test/support/integration/plugins/modules/ec2_ami_info.py279
-rw-r--r--test/support/integration/plugins/modules/ec2_group.py1345
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_net.py524
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_subnet.py604
l---------test/utils/shippable/incidental/aws.sh1
31 files changed, 0 insertions, 7642 deletions
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/aliases b/test/integration/targets/incidental_inventory_aws_ec2/aliases
deleted file mode 100644
index 41a05d3cf4..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/incidental
-context/controller
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml
deleted file mode 100644
index 8680c38d01..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- vars:
- template_name: "../templates/{{ template | default('inventory.yml') }}"
- tasks:
- - name: write inventory config file
- copy:
- dest: ../test.aws_ec2.yml
- content: "{{ lookup('template', template_name) }}"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml
deleted file mode 100644
index f67fff1a93..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: write inventory config file
- copy:
- dest: ../test.aws_ec2.yml
- content: ""
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml
deleted file mode 100644
index 07b0eec4c5..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - block:
-
- # Create VPC, subnet, security group, and find image_id to create instance
-
- - include_tasks: setup.yml
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- # Create new host, add it to inventory and then terminate it without updating the cache
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
- - name: create a new host
- ec2:
- image: '{{ image_id }}'
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- instance_type: t2.micro
- wait: yes
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- always:
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
- when: setup_instance is defined
-
- - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml
deleted file mode 100644
index 8a9b88937f..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
-- name: get image ID to create an instance
- ec2_ami_info:
- filters:
- architecture: x86_64
- owner-id: '125523088429'
- virtualization-type: hvm
- root-device-type: ebs
- name: 'Fedora-Atomic-27*'
- <<: *aws_connection_info
- register: fedora_images
-
-- set_fact:
- image_id: '{{ fedora_images.images.0.image_id }}'
-
-- name: create a VPC to work in
- ec2_vpc_net:
- cidr_block: 10.10.0.0/24
- state: present
- name: '{{ resource_prefix }}_setup'
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- register: setup_vpc
-
-- set_fact:
- vpc_id: '{{ setup_vpc.vpc.id }}'
-
-- name: create a subnet to use for creating an ec2 instance
- ec2_vpc_subnet:
- az: '{{ aws_region }}a'
- tags: '{{ resource_prefix }}_setup'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: 10.10.0.0/24
- state: present
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- register: setup_subnet
-
-- set_fact:
- subnet_id: '{{ setup_subnet.subnet.id }}'
-
-- name: create a security group to use for creating an ec2 instance
- ec2_group:
- name: '{{ resource_prefix }}_setup'
- description: 'created by Ansible integration tests'
- state: present
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- register: setup_sg
-
-- set_fact:
- sg_id: '{{ setup_sg.group_id }}'
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml
deleted file mode 100644
index 4c8240e46d..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-- name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
-- name: remove setup security group
- ec2_group:
- name: '{{ resource_prefix }}_setup'
- description: 'created by Ansible integration tests'
- state: absent
- vpc_id: '{{ vpc_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
-- name: remove setup subnet
- ec2_vpc_subnet:
- az: '{{ aws_region }}a'
- tags: '{{ resource_prefix }}_setup'
- vpc_id: '{{ vpc_id }}'
- cidr: 10.10.0.0/24
- state: absent
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- ignore_errors: yes
-
-- name: remove setup VPC
- ec2_vpc_net:
- cidr_block: 10.10.0.0/24
- state: absent
- name: '{{ resource_prefix }}_setup'
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
deleted file mode 100644
index cc1b9a5a5e..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: assert inventory was not populated by aws_ec2 inventory plugin
- assert:
- that:
- - "'aws_ec2' not in groups"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml
deleted file mode 100644
index d83cb0bfe6..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: assert cache was used to populate inventory
- assert:
- that:
- - "'aws_ec2' in groups"
- - "groups.aws_ec2 | length == 1"
-
- - meta: refresh_inventory
-
- - name: assert refresh_inventory updated the cache
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml
deleted file mode 100644
index 73a67db065..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml
+++ /dev/null
@@ -1,91 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - block:
-
- # Create VPC, subnet, security group, and find image_id to create instance
-
- - include_tasks: setup.yml
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- # Create new host, refresh inventory, remove host, refresh inventory
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
- - name: create a new host
- ec2:
- image: '{{ image_id }}'
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- instance_type: t2.micro
- wait: yes
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory and is no longer empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "groups.aws_ec2 | length == 1"
- - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- always:
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
- when: setup_instance is defined
-
- - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
deleted file mode 100644
index fdeeeeff42..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - block:
-
- # Create VPC, subnet, security group, and find image_id to create instance
-
- - include_tasks: setup.yml
-
- # Create new host, refresh inventory
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
- - name: create a new host
- ec2:
- image: '{{ image_id }}'
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- tag1: value1
- tag2: value2
- instance_type: t2.micro
- wait: yes
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: register the keyed sg group name
- set_fact:
- sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
-
- - name: register one of the keyed tag groups name
- set_fact:
- tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
-
- - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
- assert:
- that:
- # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
- - "groups | length == 9"
- - "groups[tag_group_name] | length == 1"
- - "groups[sg_group_name] | length == 1"
- - "groups.arch_x86_64 | length == 1"
- - "groups.tag_with_name_key | length == 1"
- - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
-
- always:
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: "{{ sg_id }}"
- vpc_subnet_id: "{{ subnet_id }}"
- <<: *aws_connection_info
- ignore_errors: yes
- when: setup_instance is defined
-
- - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml
deleted file mode 100644
index 6b46599b5b..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-- name: test updating inventory
- block:
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create a new host
- ec2:
- image: "{{ images[aws_region] }}"
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- instance_type: t2.micro
- wait: yes
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory and is no longer empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "groups.aws_ec2 | length == 1"
- - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- always:
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/runme.sh b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh
deleted file mode 100755
index 339be5dd31..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/runme.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-source virtualenv.sh
-
-python -m pip install boto3 boto
-
-# ensure test config is empty
-ansible-playbook playbooks/empty_inventory_config.yml "$@"
-
-export ANSIBLE_INVENTORY_ENABLED=aws_ec2
-
-# test with default inventory file
-ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
-
-export ANSIBLE_INVENTORY=test.aws_ec2.yml
-
-# test empty inventory config
-ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
-
-# generate inventory config and test using it
-ansible-playbook playbooks/create_inventory_config.yml "$@"
-ansible-playbook playbooks/test_populating_inventory.yml "$@"
-
-# generate inventory config with caching and test using it
-ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@"
-ansible-playbook playbooks/populate_cache.yml "$@"
-ansible-playbook playbooks/test_inventory_cache.yml "$@"
-
-# remove inventory cache
-rm -r aws_ec2_cache_dir/
-
-# generate inventory config with constructed features and test using it
-ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@"
-ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
-
-# cleanup inventory config
-ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml
deleted file mode 100644
index 942edb309b..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-plugin: aws_ec2
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-aws_security_token: '{{ security_token }}'
-regions:
- - '{{ aws_region }}'
-filters:
- tag:Name:
- - '{{ resource_prefix }}'
-hostnames:
- - tag:Name
- - dns-name
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml
deleted file mode 100644
index e35bf9010b..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-plugin: aws_ec2
-cache: True
-cache_plugin: jsonfile
-cache_connection: aws_ec2_cache_dir
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-aws_security_token: '{{ security_token }}'
-regions:
- - '{{ aws_region }}'
-filters:
- tag:Name:
- - '{{ resource_prefix }}'
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml
deleted file mode 100644
index 6befb4e339..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-plugin: aws_ec2
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-aws_security_token: '{{ security_token }}'
-regions:
- - '{{ aws_region }}'
-filters:
- tag:Name:
- - '{{ resource_prefix }}'
-keyed_groups:
- - key: 'security_groups|json_query("[].group_id")'
- prefix: 'security_groups'
- - key: 'tags'
- prefix: 'tag'
- - prefix: 'arch'
- key: "architecture"
-compose:
- test_compose_var_sum: tags.tag1 + tags.tag2
-groups:
- tag_with_name_key: "'Name' in (tags | list)"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml b/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml
+++ /dev/null
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
index 873fddd9a1..929fa9f0b4 100644
--- a/test/sanity/ignore.txt
+++ b/test/sanity/ignore.txt
@@ -170,13 +170,7 @@ test/integration/targets/win_script/files/test_script_with_splatting.ps1 pslint:
test/lib/ansible_test/_data/requirements/sanity.pslint.ps1 pslint:PSCustomUseLiteralPath # Uses wildcards on purpose
test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
test/lib/ansible_test/_util/target/setup/requirements.py replace-urlopen
-test/support/integration/plugins/inventory/aws_ec2.py pylint:use-a-generator
-test/support/integration/plugins/modules/ec2_group.py pylint:use-a-generator
test/support/integration/plugins/modules/timezone.py pylint:disallowed-name
-test/support/integration/plugins/module_utils/aws/core.py pylint:property-with-parameters
-test/support/integration/plugins/module_utils/cloud.py future-import-boilerplate
-test/support/integration/plugins/module_utils/cloud.py metaclass-boilerplate
-test/support/integration/plugins/module_utils/cloud.py pylint:isinstance-second-argument-not-valid-type
test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
@@ -304,4 +298,3 @@ lib/ansible/module_utils/compat/_selectors2.py mypy-3.9:assignment # vendored c
lib/ansible/module_utils/compat/_selectors2.py mypy-3.10:assignment # vendored code
lib/ansible/module_utils/compat/_selectors2.py mypy-3.11:assignment # vendored code
lib/ansible/module_utils/compat/_selectors2.py mypy-2.7:attr-defined # vendored code
-test/support/integration/plugins/modules/ec2.py pylint:ansible-deprecated-version
diff --git a/test/support/integration/plugins/inventory/aws_ec2.py b/test/support/integration/plugins/inventory/aws_ec2.py
deleted file mode 100644
index 09c42cf99b..0000000000
--- a/test/support/integration/plugins/inventory/aws_ec2.py
+++ /dev/null
@@ -1,760 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: aws_ec2
- plugin_type: inventory
- short_description: EC2 inventory source
- requirements:
- - boto3
- - botocore
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- description:
- - Get inventory hosts from Amazon Web Services EC2.
- - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
- notes:
- - If no credentials are provided and the control node has an associated IAM instance profile then the
- role will be used for authentication.
- author:
- - Sloane Hertel (@s-hertel)
- options:
- aws_profile:
- description: The AWS profile
- type: str
- aliases: [ boto_profile ]
- env:
- - name: AWS_DEFAULT_PROFILE
- - name: AWS_PROFILE
- aws_access_key:
- description: The AWS access key to use.
- type: str
- aliases: [ aws_access_key_id ]
- env:
- - name: EC2_ACCESS_KEY
- - name: AWS_ACCESS_KEY
- - name: AWS_ACCESS_KEY_ID
- aws_secret_key:
- description: The AWS secret key that corresponds to the access key.
- type: str
- aliases: [ aws_secret_access_key ]
- env:
- - name: EC2_SECRET_KEY
- - name: AWS_SECRET_KEY
- - name: AWS_SECRET_ACCESS_KEY
- aws_security_token:
- description: The AWS security token if using temporary access and secret keys.
- type: str
- env:
- - name: EC2_SECURITY_TOKEN
- - name: AWS_SESSION_TOKEN
- - name: AWS_SECURITY_TOKEN
- plugin:
- description: Token that ensures this is a source file for the plugin.
- required: True
- choices: ['aws_ec2']
- iam_role_arn:
- description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
- credentials with enough privilege to perform the AssumeRole action.
- version_added: '2.9'
- regions:
- description:
- - A list of regions in which to describe EC2 instances.
- - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
- type: list
- default: []
- hostnames:
- description:
- - A list in order of precedence for hostname variables.
- - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
- type: list
- default: []
- filters:
- description:
- - A dictionary of filter value pairs.
- - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- type: dict
- default: {}
- include_extra_api_calls:
- description:
- - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
- - Spot instances may be persistent and instances may have associated events.
- type: bool
- default: False
- version_added: '2.8'
- strict_permissions:
- description:
- - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
- - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
- type: bool
- default: True
- use_contrib_script_compatible_sanitization:
- description:
- - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
- This option allows you to override that, in efforts to allow migration from the old inventory script and
- matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
- To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
- you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
- otherwise the core engine will just use the standard sanitization on top.
- - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
- which group names end up being used as.
- type: bool
- default: False
- version_added: '2.8'
-'''
-
-EXAMPLES = '''
-# Minimal example using environment vars or instance role credentials
-# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
-plugin: aws_ec2
-regions:
- - us-east-1
-
-# Example using filters, ignoring permission errors, and specifying the hostname precedence
-plugin: aws_ec2
-boto_profile: aws_profile
-# Populate inventory with instances in these regions
-regions:
- - us-east-1
- - us-east-2
-filters:
- # All instances with their `Environment` tag set to `dev`
- tag:Environment: dev
- # All dev and QA hosts
- tag:Environment:
- - dev
- - qa
- instance.group-id: sg-xxxxxxxx
-# Ignores 403 errors rather than failing
-strict_permissions: False
-# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
-# inventory_hostname use compose (see example below).
-hostnames:
- - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
- - tag:CustomDNSName
- - dns-name
- - private-ip-address
-
-# Example using constructed features to create groups and set ansible_host
-plugin: aws_ec2
-regions:
- - us-east-1
- - us-west-1
-# keyed_groups may be used to create custom groups
-strict: False
-keyed_groups:
- # Add e.g. x86_64 hosts to an arch_x86_64 group
- - prefix: arch
- key: 'architecture'
- # Add hosts to tag_Name_Value groups for each Name/Value tag pair
- - prefix: tag
- key: tags
- # Add hosts to e.g. instance_type_z3_tiny
- - prefix: instance_type
- key: instance_type
- # Create security_groups_sg_abcd1234 group for each SG
- - key: 'security_groups|json_query("[].group_id")'
- prefix: 'security_groups'
- # Create a group for each value of the Application tag
- - key: tags.Application
- separator: ''
- # Create a group per region e.g. aws_region_us_east_2
- - key: placement.region
- prefix: aws_region
- # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
- - key: tags['Role']
- prefix: foo
- parent_group: "project"
-# Set individual variables with compose
-compose:
- # Use the private IP address to connect to the host
- # (note: this does not modify inventory_hostname, which is set via I(hostnames))
- ansible_host: private_ip_address
-'''
-
-import re
-
-from ansible.errors import AnsibleError
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.utils.display import Display
-from ansible.module_utils.six import string_types
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
-
-display = Display()
-
-# The mappings give an array of keys to get from the filter name to the value
-# returned by boto3's EC2 describe_instances method.
-
-instance_meta_filter_to_boto_attr = {
- 'group-id': ('Groups', 'GroupId'),
- 'group-name': ('Groups', 'GroupName'),
- 'network-interface.attachment.instance-owner-id': ('OwnerId',),
- 'owner-id': ('OwnerId',),
- 'requester-id': ('RequesterId',),
- 'reservation-id': ('ReservationId',),
-}
-
-instance_data_filter_to_boto_attr = {
- 'affinity': ('Placement', 'Affinity'),
- 'architecture': ('Architecture',),
- 'availability-zone': ('Placement', 'AvailabilityZone'),
- 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
- 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
- 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
- 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
- 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
- 'client-token': ('ClientToken',),
- 'dns-name': ('PublicDnsName',),
- 'host-id': ('Placement', 'HostId'),
- 'hypervisor': ('Hypervisor',),
- 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
- 'image-id': ('ImageId',),
- 'instance-id': ('InstanceId',),
- 'instance-lifecycle': ('InstanceLifecycle',),
- 'instance-state-code': ('State', 'Code'),
- 'instance-state-name': ('State', 'Name'),
- 'instance-type': ('InstanceType',),
- 'instance.group-id': ('SecurityGroups', 'GroupId'),
- 'instance.group-name': ('SecurityGroups', 'GroupName'),
- 'ip-address': ('PublicIpAddress',),
- 'kernel-id': ('KernelId',),
- 'key-name': ('KeyName',),
- 'launch-index': ('AmiLaunchIndex',),
- 'launch-time': ('LaunchTime',),
- 'monitoring-state': ('Monitoring', 'State'),
- 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
- 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
- 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
- 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
- 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
- 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
- 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
- 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
- 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
- 'network-interface.attachment.instance-id': ('InstanceId',),
- 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
- 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
- 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
- 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
- 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
- 'network-interface.description': ('NetworkInterfaces', 'Description'),
- 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
- 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
- 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
- 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
- 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
- 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
- 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
- # 'network-interface.requester-id': (),
- 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
- 'network-interface.status': ('NetworkInterfaces', 'Status'),
- 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
- 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
- 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
- 'placement-group-name': ('Placement', 'GroupName'),
- 'platform': ('Platform',),
- 'private-dns-name': ('PrivateDnsName',),
- 'private-ip-address': ('PrivateIpAddress',),
- 'product-code': ('ProductCodes', 'ProductCodeId'),
- 'product-code.type': ('ProductCodes', 'ProductCodeType'),
- 'ramdisk-id': ('RamdiskId',),
- 'reason': ('StateTransitionReason',),
- 'root-device-name': ('RootDeviceName',),
- 'root-device-type': ('RootDeviceType',),
- 'source-dest-check': ('SourceDestCheck',),
- 'spot-instance-request-id': ('SpotInstanceRequestId',),
- 'state-reason-code': ('StateReason', 'Code'),
- 'state-reason-message': ('StateReason', 'Message'),
- 'subnet-id': ('SubnetId',),
- 'tag': ('Tags',),
- 'tag-key': ('Tags',),
- 'tag-value': ('Tags',),
- 'tenancy': ('Placement', 'Tenancy'),
- 'virtualization-type': ('VirtualizationType',),
- 'vpc-id': ('VpcId',),
-}
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'aws_ec2'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- self.group_prefix = 'aws_ec2_'
-
- # credentials
- self.boto_profile = None
- self.aws_secret_access_key = None
- self.aws_access_key_id = None
- self.aws_security_token = None
- self.iam_role_arn = None
-
- def _compile_values(self, obj, attr):
- '''
- :param obj: A list or dict of instance attributes
- :param attr: A key
- :return The value(s) found via the attr
- '''
- if obj is None:
- return
-
- temp_obj = []
-
- if isinstance(obj, list) or isinstance(obj, tuple):
- for each in obj:
- value = self._compile_values(each, attr)
- if value:
- temp_obj.append(value)
- else:
- temp_obj = obj.get(attr)
-
- has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
- if has_indexes and len(temp_obj) == 1:
- return temp_obj[0]
-
- return temp_obj
-
- def _get_boto_attr_chain(self, filter_name, instance):
- '''
- :param filter_name: The filter
- :param instance: instance dict returned by boto3 ec2 describe_instances()
- '''
- allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
- if filter_name not in allowed_filters:
- raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
- allowed_filters))
- if filter_name in instance_data_filter_to_boto_attr:
- boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
- else:
- boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
-
- instance_value = instance
- for attribute in boto_attr_list:
- instance_value = self._compile_values(instance_value, attribute)
- return instance_value
-
- def _get_credentials(self):
- '''
- :return A dictionary of boto client credentials
- '''
- boto_params = {}
- for credential in (('aws_access_key_id', self.aws_access_key_id),
- ('aws_secret_access_key', self.aws_secret_access_key),
- ('aws_session_token', self.aws_security_token)):
- if credential[1]:
- boto_params[credential[0]] = credential[1]
-
- return boto_params
-
- def _get_connection(self, credentials, region='us-east-1'):
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- return connection
-
- def _boto3_assume_role(self, credentials, region):
- """
- Assume an IAM role passed by iam_role_arn parameter
-
- :return: a dict containing the credentials of the assumed role
- """
-
- iam_role_arn = self.iam_role_arn
-
- try:
- sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
- sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
- return dict(
- aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
- aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
- aws_session_token=sts_session['Credentials']['SessionToken']
- )
- except botocore.exceptions.ClientError as e:
- raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
-
- def _boto3_conn(self, regions):
- '''
- :param regions: A list of regions to create a boto3 client
-
- Generator that yields a boto3 client and the region
- '''
-
- credentials = self._get_credentials()
- iam_role_arn = self.iam_role_arn
-
- if not regions:
- try:
- # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
- client = self._get_connection(credentials)
- resp = client.describe_regions()
- regions = [x['RegionName'] for x in resp.get('Regions', [])]
- except botocore.exceptions.NoRegionError:
- # above seems to fail depending on boto3 version, ignore and lets try something else
- pass
-
- # fallback to local list hardcoded in boto3 if still no regions
- if not regions:
- session = boto3.Session()
- regions = session.get_available_regions('ec2')
-
- # I give up, now you MUST give me regions
- if not regions:
- raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
-
- for region in regions:
- connection = self._get_connection(credentials, region)
- try:
- if iam_role_arn is not None:
- assumed_credentials = self._boto3_assume_role(credentials, region)
- else:
- assumed_credentials = credentials
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- yield connection, region
-
- def _get_instances_by_region(self, regions, filters, strict_permissions):
- '''
- :param regions: a list of regions in which to describe instances
- :param filters: a list of boto3 filter dictionaries
- :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
- :return A list of instance dictionaries
- '''
- all_instances = []
-
- for connection, region in self._boto3_conn(regions):
- try:
- # By default find non-terminated/terminating instances
- if not any([f['Name'] == 'instance-state-name' for f in filters]):
- filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
- paginator = connection.get_paginator('describe_instances')
- reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
- instances = []
- for r in reservations:
- new_instances = r['Instances']
- for instance in new_instances:
- instance.update(self._get_reservation_details(r))
- if self.get_option('include_extra_api_calls'):
- instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
- instances.extend(new_instances)
- except botocore.exceptions.ClientError as e:
- if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
- instances = []
- else:
- raise AnsibleError("Failed to describe instances: %s" % to_native(e))
- except botocore.exceptions.BotoCoreError as e:
- raise AnsibleError("Failed to describe instances: %s" % to_native(e))
-
- all_instances.extend(instances)
-
- return sorted(all_instances, key=lambda x: x['InstanceId'])
-
- def _get_reservation_details(self, reservation):
- return {
- 'OwnerId': reservation['OwnerId'],
- 'RequesterId': reservation.get('RequesterId', ''),
- 'ReservationId': reservation['ReservationId']
- }
-
- def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
- host_vars = {'Events': '', 'Persistent': False}
- try:
- kwargs = {'InstanceIds': [instance_id]}
- host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if not self.get_option('strict_permissions'):
- pass
- else:
- raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
- if spot_instance:
- try:
- kwargs = {'SpotInstanceRequestIds': [spot_instance]}
- host_vars['Persistent'] = bool(
- connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if not self.get_option('strict_permissions'):
- pass
- else:
- raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
- return host_vars
-
- def _get_tag_hostname(self, preference, instance):
- tag_hostnames = preference.split('tag:', 1)[1]
- if ',' in tag_hostnames:
- tag_hostnames = tag_hostnames.split(',')
- else:
- tag_hostnames = [tag_hostnames]
- tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
- for v in tag_hostnames:
- if '=' in v:
- tag_name, tag_value = v.split('=')
- if tags.get(tag_name) == tag_value:
- return to_text(tag_name) + "_" + to_text(tag_value)
- else:
- tag_value = tags.get(v)
- if tag_value:
- return to_text(tag_value)
- return None
-
- def _get_hostname(self, instance, hostnames):
- '''
- :param instance: an instance dict returned by boto3 ec2 describe_instances()
- :param hostnames: a list of hostname destination variables in order of preference
- :return the preferred identifer for the host
- '''
- if not hostnames:
- hostnames = ['dns-name', 'private-dns-name']
-
- hostname = None
- for preference in hostnames:
- if 'tag' in preference:
- if not preference.startswith('tag:'):
- raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
- hostname = self._get_tag_hostname(preference, instance)
- else:
- hostname = self._get_boto_attr_chain(preference, instance)
- if hostname:
- break
- if hostname:
- if ':' in to_text(hostname):
- return self._sanitize_group_name((to_text(hostname)))
- else:
- return to_text(hostname)
-
- def _query(self, regions, filters, strict_permissions):
- '''
- :param regions: a list of regions to query
- :param filters: a list of boto3 filter dictionaries
- :param hostnames: a list of hostname destination variables in order of preference
- :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
- '''
- return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
-
- def _populate(self, groups, hostnames):
- for group in groups:
- group = self.inventory.add_group(group)
- self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
- self.inventory.add_child('all', group)
-
- def _add_hosts(self, hosts, group, hostnames):
- '''
- :param hosts: a list of hosts to be added to a group
- :param group: the name of the group to which the hosts belong
- :param hostnames: a list of hostname destination variables in order of preference
- '''
- for host in hosts:
- hostname = self._get_hostname(host, hostnames)
-
- host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
- host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
-
- # Allow easier grouping by region
- host['placement']['region'] = host['placement']['availability_zone'][:-1]
-
- if not hostname:
- continue
- self.inventory.add_host(hostname, group=group)
- for hostvar, hostval in host.items():
- self.inventory.set_variable(hostname, hostvar, hostval)
-
- # Use constructed if applicable
-
- strict = self.get_option('strict')
-
- # Composed variables
- self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
-
- # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
- self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
-
- # Create groups based on variable values and add the corresponding hosts to it
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
-
- self.boto_profile = self.get_option('aws_profile')
- self.aws_access_key_id = self.get_option('aws_access_key')
- self.aws_secret_access_key = self.get_option('aws_secret_key')
- self.aws_security_token = self.get_option('aws_security_token')
- self.iam_role_arn = self.get_option('iam_role_arn')
-
- if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
- session = botocore.session.get_session()
- try:
- credentials = session.get_credentials().get_frozen_credentials()
- except AttributeError:
- pass
- else:
- self.aws_access_key_id = credentials.access_key
- self.aws_secret_access_key = credentials.secret_key
- self.aws_security_token = credentials.token
-
- if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
- raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
- "inventory configuration file or set them as environment variables.")
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
- return True
- display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
- return False
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- self._read_config_data(path)
-
- if self.get_option('use_contrib_script_compatible_sanitization'):
- self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
-
- self._set_credentials()
-
- # get user specifications
- regions = self.get_option('regions')
- filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
- hostnames = self.get_option('hostnames')
- strict_permissions = self.get_option('strict_permissions')
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
-
- if not cache or cache_needs_update:
- results = self._query(regions, filters, strict_permissions)
-
- self._populate(results, hostnames)
-
- # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = results
-
- @staticmethod
- def _legacy_script_compatible_group_sanitization(name):
-
- # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
- regex = re.compile(r"[^A-Za-z0-9\_\-]")
-
- return regex.sub('_', name)
-
-
-def ansible_dict_to_boto3_filter_list(filters_dict):
-
- """ Convert an Ansible dict of filters to list of dicts that boto3 can use
- Args:
- filters_dict (dict): Dict of AWS filters.
- Basic Usage:
- >>> filters = {'some-aws-id': 'i-01234567'}
- >>> ansible_dict_to_boto3_filter_list(filters)
- {
- 'some-aws-id': 'i-01234567'
- }
- Returns:
- List: List of AWS filters and their values
- [
- {
- 'Name': 'some-aws-id',
- 'Values': [
- 'i-01234567',
- ]
- }
- ]
- """
-
- filters_list = []
- for k, v in filters_dict.items():
- filter_dict = {'Name': k}
- if isinstance(v, string_types):
- filter_dict['Values'] = [v]
- else:
- filter_dict['Values'] = v
-
- filters_list.append(filter_dict)
-
- return filters_list
-
-
-def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
-
- """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
- Args:
- tags_list (list): List of dicts representing AWS tags.
- tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
- tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
- Basic Usage:
- >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
- >>> boto3_tag_list_to_ansible_dict(tags_list)
- [
- {
- 'Key': 'MyTagKey',
- 'Value': 'MyTagValue'
- }
- ]
- Returns:
- Dict: Dict of key:value pairs representing AWS tags
- {
- 'MyTagKey': 'MyTagValue',
- }
- """
-
- if tag_name_key_name and tag_value_key_name:
- tag_candidates = {tag_name_key_name: tag_value_key_name}
- else:
- tag_candidates = {'key': 'value', 'Key': 'Value'}
-
- if not tags_list:
- return {}
- for k, v in tag_candidates.items():
- if k in tags_list[0] and v in tags_list[0]:
- return dict((tag[k], tag[v]) for tag in tags_list)
- raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
diff --git a/test/support/integration/plugins/module_utils/aws/__init__.py b/test/support/integration/plugins/module_utils/aws/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/support/integration/plugins/module_utils/aws/__init__.py
+++ /dev/null
diff --git a/test/support/integration/plugins/module_utils/aws/core.py b/test/support/integration/plugins/module_utils/aws/core.py
deleted file mode 100644
index 909d0396d4..0000000000
--- a/test/support/integration/plugins/module_utils/aws/core.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#
-# Copyright 2017 Michael De La Rue | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-"""This module adds shared support for generic Amazon AWS modules
-
-**This code is not yet ready for use in user modules. As of 2017**
-**and through to 2018, the interface is likely to change**
-**aggressively as the exact correct interface for ansible AWS modules**
-**is identified. In particular, until this notice goes away or is**
-**changed, methods may disappear from the interface. Please don't**
-**publish modules using this except directly to the main Ansible**
-**development repository.**
-
-In order to use this module, include it as part of a custom
-module as shown below.
-
- from ansible.module_utils.aws import AnsibleAWSModule
- module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
- mutually_exclusive=list1, required_together=list2)
-
-The 'AnsibleAWSModule' module provides similar, but more restricted,
-interfaces to the normal Ansible module. It also includes the
-additional methods for connecting to AWS using the standard module arguments
-
- m.resource('lambda') # - get an AWS connection as a boto3 resource.
-
-or
-
- m.client('sts') # - get an AWS connection as a boto3 client.
-
-To make use of AWSRetry easier, it can now be wrapped around any call from a
-module-created client. To add retries to a client, create a client:
-
- m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
-
-Any calls from that client can be made to use the decorator passed at call-time
-using the `aws_retry` argument. By default, no retries are used.
-
- ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
- ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
-
-The call will be retried the specified number of times, so the calling functions
-don't need to be wrapped in the backoff decorator.
-"""
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import re
-import logging
-import traceback
-from functools import wraps
-from ansible.module_utils.compat.version import LooseVersion
-
-try:
- from cStringIO import StringIO
-except ImportError:
- # Python 3
- from io import StringIO
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils._text import to_native
-from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
-from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
-
-# We will also export HAS_BOTO3 so end user modules can use it.
-__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
-
-
-class AnsibleAWSModule(object):
- """An ansible module class for AWS modules
-
- AnsibleAWSModule provides an a class for building modules which
- connect to Amazon Web Services. The interface is currently more
- restricted than the basic module class with the aim that later the
- basic module class can be reduced. If you find that any key
- feature is missing please contact the author/Ansible AWS team
- (available on #ansible-aws on IRC) to request the additional
- features needed.
- """
- default_settings = {
- "default_args": True,
- "check_boto3": True,
- "auto_retry": True,
- "module_class": AnsibleModule
- }
-
- def __init__(self, **kwargs):
- local_settings = {}
- for key in AnsibleAWSModule.default_settings:
- try:
- local_settings[key] = kwargs.pop(key)
- except KeyError:
- local_settings[key] = AnsibleAWSModule.default_settings[key]
- self.settings = local_settings
-
- if local_settings["default_args"]:
- # ec2_argument_spec contains the region so we use that; there's a patch coming which
- # will add it to aws_argument_spec so if that's accepted then later we should change
- # over
- argument_spec_full = ec2_argument_spec()
- try:
- argument_spec_full.update(kwargs["argument_spec"])
- except (TypeError, NameError):
- pass
- kwargs["argument_spec"] = argument_spec_full
-
- self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
-
- if local_settings["check_boto3"] and not HAS_BOTO3:
- self._module.fail_json(
- msg=missing_required_lib('botocore or boto3'))
-
- self.check_mode = self._module.check_mode
- self._diff = self._module._diff
- self._name = self._module._name
-
- self._botocore_endpoint_log_stream = StringIO()
- self.logger = None
- if self.params.get('debug_botocore_endpoint_logs'):
- self.logger = logging.getLogger('botocore.endpoint')
- self.logger.setLevel(logging.DEBUG)
- self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
-
- @property
- def params(self):
- return self._module.params
-
- def _get_resource_action_list(self):
- actions = []
- for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
- ln = ln.strip()
- if not ln:
- continue
- found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
- if found_operational_request:
- operation_request = found_operational_request.group(0)[20:-1]
- resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
- actions.append("{0}:{1}".format(resource, operation_request))
- return list(set(actions))
-
- def exit_json(self, *args, **kwargs):
- if self.params.get('debug_botocore_endpoint_logs'):
- kwargs['resource_actions'] = self._get_resource_action_list()
- return self._module.exit_json(*args, **kwargs)
-
- def fail_json(self, *args, **kwargs):
- if self.params.get('debug_botocore_endpoint_logs'):
- kwargs['resource_actions'] = self._get_resource_action_list()
- return self._module.fail_json(*args, **kwargs)
-
- def debug(self, *args, **kwargs):
- return self._module.debug(*args, **kwargs)
-
- def warn(self, *args, **kwargs):
- return self._module.warn(*args, **kwargs)
-
- def deprecate(self, *args, **kwargs):
- return self._module.deprecate(*args, **kwargs)
-
- def boolean(self, *args, **kwargs):
- return self._module.boolean(*args, **kwargs)
-
- def md5(self, *args, **kwargs):
- return self._module.md5(*args, **kwargs)
-
- def client(self, service, retry_decorator=None):
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
- conn = boto3_conn(self, conn_type='client', resource=service,
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
- return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
-
- def resource(self, service):
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
- return boto3_conn(self, conn_type='resource', resource=service,
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- @property
- def region(self, boto3=True):
- return get_aws_region(self, boto3)
-
- def fail_json_aws(self, exception, msg=None):
- """call fail_json with processed exception
-
- function for converting exceptions thrown by AWS SDK modules,
- botocore, boto3 and boto, into nice error messages.
- """
- last_traceback = traceback.format_exc()
-
- # to_native is trusted to handle exceptions that str() could
- # convert to text.
- try:
- except_msg = to_native(exception.message)
- except AttributeError:
- except_msg = to_native(exception)
-
- if msg is not None:
- message = '{0}: {1}'.format(msg, except_msg)
- else:
- message = except_msg
-
- try:
- response = exception.response
- except AttributeError:
- response = None
-
- failure = dict(
- msg=message,
- exception=last_traceback,
- **self._gather_versions()
- )
-
- if response is not None:
- failure.update(**camel_dict_to_snake_dict(response))
-
- self.fail_json(**failure)
-
- def _gather_versions(self):
- """Gather AWS SDK (boto3 and botocore) dependency versions
-
- Returns {'boto3_version': str, 'botocore_version': str}
- Returns {} if neither are installed
- """
- if not HAS_BOTO3:
- return {}
- import boto3
- import botocore
- return dict(boto3_version=boto3.__version__,
- botocore_version=botocore.__version__)
-
- def boto3_at_least(self, desired):
- """Check if the available boto3 version is greater than or equal to a desired version.
-
- Usage:
- if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
- # conditionally fail on old boto3 versions if a specific feature is not supported
- module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
- """
- existing = self._gather_versions()
- return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
-
- def botocore_at_least(self, desired):
- """Check if the available botocore version is greater than or equal to a desired version.
-
- Usage:
- if not module.botocore_at_least('1.2.3'):
- module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
- if not module.botocore_at_least('1.5.3'):
- module.warn('Botocore did not include waiters for Service X before 1.5.3. '
- 'To wait until Service X resources are fully available, update botocore.')
- """
- existing = self._gather_versions()
- return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
-
-
-class _RetryingBotoClientWrapper(object):
- __never_wait = (
- 'get_paginator', 'can_paginate',
- 'get_waiter', 'generate_presigned_url',
- )
-
- def __init__(self, client, retry):
- self.client = client
- self.retry = retry
-
- def _create_optional_retry_wrapper_function(self, unwrapped):
- retrying_wrapper = self.retry(unwrapped)
-
- @wraps(unwrapped)
- def deciding_wrapper(aws_retry=False, *args, **kwargs):
- if aws_retry:
- return retrying_wrapper(*args, **kwargs)
- else:
- return unwrapped(*args, **kwargs)
- return deciding_wrapper
-
- def __getattr__(self, name):
- unwrapped = getattr(self.client, name)
- if name in self.__never_wait:
- return unwrapped
- elif callable(unwrapped):
- wrapped = self._create_optional_retry_wrapper_function(unwrapped)
- setattr(self, name, wrapped)
- return wrapped
- else:
- return unwrapped
-
-
-def is_boto3_error_code(code, e=None):
- """Check if the botocore exception is raised by a specific error code.
-
- Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
-
- Example:
- try:
- ec2.describe_instances(InstanceIds=['potato'])
- except is_boto3_error_code('InvalidInstanceID.Malformed'):
- # handle the error for that code case
- except botocore.exceptions.ClientError as e:
- # handle the generic error case for all other codes
- """
- from botocore.exceptions import ClientError
- if e is None:
- import sys
- dummy, e, dummy = sys.exc_info()
- if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
- return ClientError
- return type('NeverEverRaisedException', (Exception,), {})
-
-
-def get_boto3_client_method_parameters(client, method_name, required=False):
- op = client.meta.method_to_api_mapping.get(method_name)
- input_shape = client._service_model.operation_model(op).input_shape
- if not input_shape:
- parameters = []
- elif required:
- parameters = list(input_shape.required_members)
- else:
- parameters = list(input_shape.members.keys())
- return parameters
diff --git a/test/support/integration/plugins/module_utils/aws/iam.py b/test/support/integration/plugins/module_utils/aws/iam.py
deleted file mode 100644
index f05999aa37..0000000000
--- a/test/support/integration/plugins/module_utils/aws/iam.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import traceback
-
-try:
- from botocore.exceptions import ClientError, NoCredentialsError
-except ImportError:
- pass # caught by HAS_BOTO3
-
-from ansible.module_utils._text import to_native
-
-
-def get_aws_account_id(module):
- """ Given AnsibleAWSModule instance, get the active AWS account ID
-
- get_account_id tries too find out the account that we are working
- on. It's not guaranteed that this will be easy so we try in
- several different ways. Giving either IAM or STS privilages to
- the account should be enough to permit this.
- """
- account_id = None
- try:
- sts_client = module.client('sts')
- account_id = sts_client.get_caller_identity().get('Account')
- # non-STS sessions may also get NoCredentialsError from this STS call, so
- # we must catch that too and try the IAM version
- except (ClientError, NoCredentialsError):
- try:
- iam_client = module.client('iam')
- account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
- except ClientError as e:
- if (e.response['Error']['Code'] == 'AccessDenied'):
- except_msg = to_native(e)
- # don't match on `arn:aws` because of China region `arn:aws-cn` and similar
- account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
- if account_id is None:
- module.fail_json_aws(e, msg="Could not get AWS account information")
- except Exception as e:
- module.fail_json(
- msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
- exception=traceback.format_exc()
- )
- if not account_id:
- module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
- return to_native(account_id)
diff --git a/test/support/integration/plugins/module_utils/aws/s3.py b/test/support/integration/plugins/module_utils/aws/s3.py
deleted file mode 100644
index 2185869d49..0000000000
--- a/test/support/integration/plugins/module_utils/aws/s3.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) 2018 Red Hat, Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # Handled by the calling module
-
-HAS_MD5 = True
-try:
- from hashlib import md5
-except ImportError:
- try:
- from md5 import md5
- except ImportError:
- HAS_MD5 = False
-
-
-def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
- if not HAS_MD5:
- return None
-
- if '-' in etag:
- # Multi-part ETag; a hash of the hashes of each part.
- parts = int(etag[1:-1].split('-')[1])
- digests = []
-
- s3_kwargs = dict(
- Bucket=bucket,
- Key=obj,
- )
- if version:
- s3_kwargs['VersionId'] = version
-
- with open(filename, 'rb') as f:
- for part_num in range(1, parts + 1):
- s3_kwargs['PartNumber'] = part_num
- try:
- head = s3.head_object(**s3_kwargs)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get head object")
- digests.append(md5(f.read(int(head['ContentLength']))))
-
- digest_squared = md5(b''.join(m.digest() for m in digests))
- return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
- else: # Compute the MD5 sum normally
- return '"{0}"'.format(module.md5(filename))
diff --git a/test/support/integration/plugins/module_utils/aws/waiters.py b/test/support/integration/plugins/module_utils/aws/waiters.py
deleted file mode 100644
index 25db598bcb..0000000000
--- a/test/support/integration/plugins/module_utils/aws/waiters.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-try:
- import botocore.waiter as core_waiter
-except ImportError:
- pass # caught by HAS_BOTO3
-
-
-ec2_data = {
- "version": 2,
- "waiters": {
- "InternetGatewayExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeInternetGateways",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(InternetGateways) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidInternetGatewayID.NotFound",
- "state": "retry"
- },
- ]
- },
- "RouteTableExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeRouteTables",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(RouteTables[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidRouteTableID.NotFound",
- "state": "retry"
- },
- ]
- },
- "SecurityGroupExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSecurityGroups",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(SecurityGroups[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidGroup.NotFound",
- "state": "retry"
- },
- ]
- },
- "SubnetExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(Subnets[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidSubnetID.NotFound",
- "state": "retry"
- },
- ]
- },
- "SubnetHasMapPublic": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": True,
- "argument": "Subnets[].MapPublicIpOnLaunch",
- "state": "success"
- },
- ]
- },
- "SubnetNoMapPublic": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": False,
- "argument": "Subnets[].MapPublicIpOnLaunch",
- "state": "success"
- },
- ]
- },
- "SubnetHasAssignIpv6": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": True,
- "argument": "Subnets[].AssignIpv6AddressOnCreation",
- "state": "success"
- },
- ]
- },
- "SubnetNoAssignIpv6": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": False,
- "argument": "Subnets[].AssignIpv6AddressOnCreation",
- "state": "success"
- },
- ]
- },
- "SubnetDeleted": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(Subnets[]) > `0`",
- "state": "retry"
- },
- {
- "matcher": "error",
- "expected": "InvalidSubnetID.NotFound",
- "state": "success"
- },
- ]
- },
- "VpnGatewayExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeVpnGateways",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(VpnGateways[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidVpnGatewayID.NotFound",
- "state": "retry"
- },
- ]
- },
- "VpnGatewayDetached": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeVpnGateways",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "VpnGateways[0].State == 'available'",
- "state": "success"
- },
- ]
- },
- }
-}
-
-
-waf_data = {
- "version": 2,
- "waiters": {
- "ChangeTokenInSync": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "GetChangeTokenStatus",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "ChangeTokenStatus == 'INSYNC'",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "WAFInternalErrorException",
- "state": "retry"
- }
- ]
- }
- }
-}
-
-eks_data = {
- "version": 2,
- "waiters": {
- "ClusterActive": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "DescribeCluster",
- "acceptors": [
- {
- "state": "success",
- "matcher": "path",
- "argument": "cluster.status",
- "expected": "ACTIVE"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
- },
- "ClusterDeleted": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "DescribeCluster",
- "acceptors": [
- {
- "state": "retry",
- "matcher": "path",
- "argument": "cluster.status != 'DELETED'",
- "expected": True
- },
- {
- "state": "success",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
- }
- }
-}
-
-
-rds_data = {
- "version": 2,
- "waiters": {
- "DBInstanceStopped": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "DescribeDBInstances",
- "acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "argument": "DBInstances[].DBInstanceStatus",
- "expected": "stopped"
- },
- ]
- }
- }
-}
-
-
-def ec2_model(name):
- ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
- return ec2_models.get_waiter(name)
-
-
-def waf_model(name):
- waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
- return waf_models.get_waiter(name)
-
-
-def eks_model(name):
- eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
- return eks_models.get_waiter(name)
-
-
-def rds_model(name):
- rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
- return rds_models.get_waiter(name)
-
-
-waiters_by_name = {
- ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
- 'internet_gateway_exists',
- ec2_model('InternetGatewayExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_internet_gateways
- )),
- ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
- 'route_table_exists',
- ec2_model('RouteTableExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_route_tables
- )),
- ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
- 'security_group_exists',
- ec2_model('SecurityGroupExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_security_groups
- )),
- ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
- 'subnet_exists',
- ec2_model('SubnetExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
- 'subnet_has_map_public',
- ec2_model('SubnetHasMapPublic'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
- 'subnet_no_map_public',
- ec2_model('SubnetNoMapPublic'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
- 'subnet_has_assign_ipv6',
- ec2_model('SubnetHasAssignIpv6'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
- 'subnet_no_assign_ipv6',
- ec2_model('SubnetNoAssignIpv6'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
- 'subnet_deleted',
- ec2_model('SubnetDeleted'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
- 'vpn_gateway_exists',
- ec2_model('VpnGatewayExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpn_gateways
- )),
- ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
- 'vpn_gateway_detached',
- ec2_model('VpnGatewayDetached'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpn_gateways
- )),
- ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
- 'change_token_in_sync',
- waf_model('ChangeTokenInSync'),
- core_waiter.NormalizedOperationMethod(
- waf.get_change_token_status
- )),
- ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
- 'change_token_in_sync',
- waf_model('ChangeTokenInSync'),
- core_waiter.NormalizedOperationMethod(
- waf.get_change_token_status
- )),
- ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
- 'cluster_active',
- eks_model('ClusterActive'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_cluster
- )),
- ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
- 'cluster_deleted',
- eks_model('ClusterDeleted'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_cluster
- )),
- ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
- 'db_instance_stopped',
- rds_model('DBInstanceStopped'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_instances
- )),
-}
-
-
-def get_waiter(client, waiter_name):
- try:
- return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
- except KeyError:
- raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
- waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/test/support/integration/plugins/module_utils/cloud.py b/test/support/integration/plugins/module_utils/cloud.py
deleted file mode 100644
index 0d29071fe1..0000000000
--- a/test/support/integration/plugins/module_utils/cloud.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#
-# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-"""
-This module adds shared support for generic cloud modules
-
-In order to use this module, include it as part of a custom
-module as shown below.
-
-from ansible.module_utils.cloud import CloudRetry
-
-The 'cloud' module provides the following common classes:
-
- * CloudRetry
- - The base class to be used by other cloud providers, in order to
- provide a backoff/retry decorator based on status codes.
-
- - Example using the AWSRetry class which inherits from CloudRetry.
-
- @AWSRetry.exponential_backoff(retries=10, delay=3)
- get_ec2_security_group_ids_from_names()
-
- @AWSRetry.jittered_backoff()
- get_ec2_security_group_ids_from_names()
-
-"""
-import random
-from functools import wraps
-import syslog
-import time
-
-
-def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
- """ Customizable exponential backoff strategy.
- Args:
- retries (int): Maximum number of times to retry a request.
- delay (float): Initial (base) delay.
- backoff (float): base of the exponent to use for exponential
- backoff.
- max_delay (int): Optional. If provided each delay generated is capped
- at this amount. Defaults to 60 seconds.
- Returns:
- Callable that returns a generator. This generator yields durations in
- seconds to be used as delays for an exponential backoff strategy.
- Usage:
- >>> backoff = _exponential_backoff()
- >>> backoff
- <function backoff_backoff at 0x7f0d939facf8>
- >>> list(backoff())
- [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
- """
- def backoff_gen():
- for retry in range(0, retries):
- sleep = delay * backoff ** retry
- yield sleep if max_delay is None else min(sleep, max_delay)
- return backoff_gen
-
-
-def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
- """ Implements the "Full Jitter" backoff strategy described here
- https://www.awsarchitectureblog.com/2015/03/backoff.html
- Args:
- retries (int): Maximum number of times to retry a request.
- delay (float): Approximate number of seconds to sleep for the first
- retry.
- max_delay (int): The maximum number of seconds to sleep for any retry.
- _random (random.Random or None): Makes this generator testable by
- allowing developers to explicitly pass in the a seeded Random.
- Returns:
- Callable that returns a generator. This generator yields durations in
- seconds to be used as delays for a full jitter backoff strategy.
- Usage:
- >>> backoff = _full_jitter_backoff(retries=5)
- >>> backoff
- <function backoff_backoff at 0x7f0d939facf8>
- >>> list(backoff())
- [3, 6, 5, 23, 38]
- >>> list(backoff())
- [2, 1, 6, 6, 31]
- """
- def backoff_gen():
- for retry in range(0, retries):
- yield _random.randint(0, min(max_delay, delay * 2 ** retry))
- return backoff_gen
-
-
-class CloudRetry(object):
- """ CloudRetry can be used by any cloud provider, in order to implement a
- backoff algorithm/retry effect based on Status Code from Exceptions.
- """
- # This is the base class of the exception.
- # AWS Example botocore.exceptions.ClientError
- base_class = None
-
- @staticmethod
- def status_code_from_exception(error):
- """ Return the status code from the exception object
- Args:
- error (object): The exception itself.
- """
- pass
-
- @staticmethod
- def found(response_code, catch_extra_error_codes=None):
- """ Return True if the Response Code to retry on was found.
- Args:
- response_code (str): This is the Response Code that is being matched against.
- """
- pass
-
- @classmethod
- def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
- """ Retry calling the Cloud decorated function using the provided
- backoff strategy.
- Args:
- backoff_strategy (callable): Callable that returns a generator. The
- generator should yield sleep times for each retry of the decorated
- function.
- """
- def deco(f):
- @wraps(f)
- def retry_func(*args, **kwargs):
- for delay in backoff_strategy():
- try:
- return f(*args, **kwargs)
- except Exception as e:
- if isinstance(e, cls.base_class):
- response_code = cls.status_code_from_exception(e)
- if cls.found(response_code, catch_extra_error_codes):
- msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
- syslog.syslog(syslog.LOG_INFO, msg)
- time.sleep(delay)
- else:
- # Return original exception if exception is not a ClientError
- raise e
- else:
- # Return original exception if exception is not a ClientError
- raise e
- return f(*args, **kwargs)
-
- return retry_func # true decorator
-
- return deco
-
- @classmethod
- def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
- """
- Retry calling the Cloud decorated function using an exponential backoff.
-
- Kwargs:
- retries (int): Number of times to retry a failed request before giving up
- default=10
- delay (int or float): Initial delay between retries in seconds
- default=3
- backoff (int or float): backoff multiplier e.g. value of 2 will
- double the delay each retry
- default=1.1
- max_delay (int or None): maximum amount of time to wait between retries.
- default=60
- """
- return cls._backoff(_exponential_backoff(
- retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
-
- @classmethod
- def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
- """
- Retry calling the Cloud decorated function using a jittered backoff
- strategy. More on this strategy here:
-
- https://www.awsarchitectureblog.com/2015/03/backoff.html
-
- Kwargs:
- retries (int): Number of times to retry a failed request before giving up
- default=10
- delay (int): Initial delay between retries in seconds
- default=3
- max_delay (int): maximum amount of time to wait between retries.
- default=60
- """
- return cls._backoff(_full_jitter_backoff(
- retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
-
- @classmethod
- def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
- """
- Retry calling the Cloud decorated function using an exponential backoff.
-
- Compatibility for the original implementation of CloudRetry.backoff that
- did not provide configurable backoff strategies. Developers should use
- CloudRetry.exponential_backoff instead.
-
- Kwargs:
- tries (int): Number of times to try (not retry) before giving up
- default=10
- delay (int or float): Initial delay between retries in seconds
- default=3
- backoff (int or float): backoff multiplier e.g. value of 2 will
- double the delay each retry
- default=1.1
- """
- return cls.exponential_backoff(
- retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/test/support/integration/plugins/module_utils/ec2.py b/test/support/integration/plugins/module_utils/ec2.py
deleted file mode 100644
index 0d28108dd5..0000000000
--- a/test/support/integration/plugins/module_utils/ec2.py
+++ /dev/null
@@ -1,758 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import re
-import sys
-import traceback
-
-from ansible.module_utils.ansible_release import __version__
-from ansible.module_utils.basic import missing_required_lib, env_fallback
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.cloud import CloudRetry
-from ansible.module_utils.six import string_types, binary_type, text_type
-from ansible.module_utils.common.dict_transformations import (
- camel_dict_to_snake_dict, snake_dict_to_camel_dict,
- _camel_to_snake, _snake_to_camel,
-)
-
-BOTO_IMP_ERR = None
-try:
- import boto
- import boto.ec2 # boto does weird import stuff
- HAS_BOTO = True
-except ImportError:
- BOTO_IMP_ERR = traceback.format_exc()
- HAS_BOTO = False
-
-BOTO3_IMP_ERR = None
-try:
- import boto3
- import botocore
- HAS_BOTO3 = True
-except Exception:
- BOTO3_IMP_ERR = traceback.format_exc()
- HAS_BOTO3 = False
-
-try:
- # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
- # uses this (and it works as expected). Python 2.6 will trigger the ImportError.
- from functools import cmp_to_key
- PY3_COMPARISON = True
-except ImportError:
- PY3_COMPARISON = False
-
-
-class AnsibleAWSError(Exception):
- pass
-
-
-def _botocore_exception_maybe():
- """
- Allow for boto3 not being installed when using these utils by wrapping
- botocore.exceptions instead of assigning from it directly.
- """
- if HAS_BOTO3:
- return botocore.exceptions.ClientError
- return type(None)
-
-
-class AWSRetry(CloudRetry):
- base_class = _botocore_exception_maybe()
-
- @staticmethod
- def status_code_from_exception(error):
- return error.response['Error']['Code']
-
- @staticmethod
- def found(response_code, catch_extra_error_codes=None):
- # This list of failures is based on this API Reference
- # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
- #
- # TooManyRequestsException comes from inside botocore when it
- # does retrys, unfortunately however it does not try long
- # enough to allow some services such as API Gateway to
- # complete configuration. At the moment of writing there is a
- # botocore/boto3 bug open to fix this.
- #
- # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
- retry_on = [
- 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
- 'InternalFailure', 'InternalError', 'TooManyRequestsException',
- 'Throttling'
- ]
- if catch_extra_error_codes:
- retry_on.extend(catch_extra_error_codes)
-
- return response_code in retry_on
-
-
-def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
- try:
- return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
- except ValueError as e:
- module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
- botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
- module.fail_json(msg=to_native(e))
- except botocore.exceptions.NoRegionError as e:
- module.fail_json(msg="The %s module requires a region and none was found in configuration, "
- "environment variables or module parameters" % module._name)
-
-
-def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
- profile = params.pop('profile_name', None)
-
- if conn_type not in ['both', 'resource', 'client']:
- raise ValueError('There is an issue in the calling code. You '
- 'must specify either both, resource, or client to '
- 'the conn_type parameter in the boto3_conn function '
- 'call')
-
- config = botocore.config.Config(
- user_agent_extra='Ansible/{0}'.format(__version__),
- )
-
- if params.get('config') is not None:
- config = config.merge(params.pop('config'))
- if params.get('aws_config') is not None:
- config = config.merge(params.pop('aws_config'))
-
- session = boto3.session.Session(
- profile_name=profile,
- )
-
- if conn_type == 'resource':
- return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
- elif conn_type == 'client':
- return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
- else:
- client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
- resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
- return client, resource
-
-
-boto3_inventory_conn = _boto3_conn
-
-
-def boto_exception(err):
- """
- Extracts the error message from a boto exception.
-
- :param err: Exception from boto
- :return: Error message
- """
- if hasattr(err, 'error_message'):
- error = err.error_message
- elif hasattr(err, 'message'):
- error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
- else:
- error = '%s: %s' % (Exception, err)
-
- return error
-
-
-def aws_common_argument_spec():
- return dict(
- debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
- ec2_url=dict(),
- aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
- aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
- validate_certs=dict(default=True, type='bool'),
- security_token=dict(aliases=['access_token'], no_log=True),
- profile=dict(),
- aws_config=dict(type='dict'),
- )
-
-
-def ec2_argument_spec():
- spec = aws_common_argument_spec()
- spec.update(
- dict(
- region=dict(aliases=['aws_region', 'ec2_region']),
- )
- )
- return spec
-
-
-def get_aws_region(module, boto3=False):
- region = module.params.get('region')
-
- if region:
- return region
-
- if 'AWS_REGION' in os.environ:
- return os.environ['AWS_REGION']
- if 'AWS_DEFAULT_REGION' in os.environ:
- return os.environ['AWS_DEFAULT_REGION']
- if 'EC2_REGION' in os.environ:
- return os.environ['EC2_REGION']
-
- if not boto3:
- if not HAS_BOTO:
- module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
- # boto.config.get returns None if config not found
- region = boto.config.get('Boto', 'aws_region')
- if region:
- return region
- return boto.config.get('Boto', 'ec2_region')
-
- if not HAS_BOTO3:
- module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
-
- # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
- try:
- profile_name = module.params.get('profile')
- return botocore.session.Session(profile=profile_name).get_config_variable('region')
- except botocore.exceptions.ProfileNotFound as e:
- return None
-
-
-def get_aws_connection_info(module, boto3=False):
-
- # Check module args for credentials, then check environment vars
- # access_key
-
- ec2_url = module.params.get('ec2_url')
- access_key = module.params.get('aws_access_key')
- secret_key = module.params.get('aws_secret_key')
- security_token = module.params.get('security_token')
- region = get_aws_region(module, boto3)
- profile_name = module.params.get('profile')
- validate_certs = module.params.get('validate_certs')
- config = module.params.get('aws_config')
-
- if not ec2_url:
- if 'AWS_URL' in os.environ:
- ec2_url = os.environ['AWS_URL']
- elif 'EC2_URL' in os.environ:
- ec2_url = os.environ['EC2_URL']
-
- if not access_key:
- if os.environ.get('AWS_ACCESS_KEY_ID'):
- access_key = os.environ['AWS_ACCESS_KEY_ID']
- elif os.environ.get('AWS_ACCESS_KEY'):
- access_key = os.environ['AWS_ACCESS_KEY']
- elif os.environ.get('EC2_ACCESS_KEY'):
- access_key = os.environ['EC2_ACCESS_KEY']
- elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
- access_key = boto.config.get('Credentials', 'aws_access_key_id')
- elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
- access_key = boto.config.get('default', 'aws_access_key_id')
- else:
- # in case access_key came in as empty string
- access_key = None
-
- if not secret_key:
- if os.environ.get('AWS_SECRET_ACCESS_KEY'):
- secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
- elif os.environ.get('AWS_SECRET_KEY'):
- secret_key = os.environ['AWS_SECRET_KEY']
- elif os.environ.get('EC2_SECRET_KEY'):
- secret_key = os.environ['EC2_SECRET_KEY']
- elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
- secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
- elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
- secret_key = boto.config.get('default', 'aws_secret_access_key')
- else:
- # in case secret_key came in as empty string
- secret_key = None
-
- if not security_token:
- if os.environ.get('AWS_SECURITY_TOKEN'):
- security_token = os.environ['AWS_SECURITY_TOKEN']
- elif os.environ.get('AWS_SESSION_TOKEN'):
- security_token = os.environ['AWS_SESSION_TOKEN']
- elif os.environ.get('EC2_SECURITY_TOKEN'):
- security_token = os.environ['EC2_SECURITY_TOKEN']
- elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
- security_token = boto.config.get('Credentials', 'aws_security_token')
- elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
- security_token = boto.config.get('default', 'aws_security_token')
- else:
- # in case secret_token came in as empty string
- security_token = None
-
- if HAS_BOTO3 and boto3:
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- aws_session_token=security_token)
- boto_params['verify'] = validate_certs
-
- if profile_name:
- boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
- boto_params['profile_name'] = profile_name
-
- else:
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- security_token=security_token)
-
- # only set profile_name if passed as an argument
- if profile_name:
- boto_params['profile_name'] = profile_name
-
- boto_params['validate_certs'] = validate_certs
-
- if config is not None:
- if HAS_BOTO3 and boto3:
- boto_params['aws_config'] = botocore.config.Config(**config)
- elif HAS_BOTO and not boto3:
- if 'user_agent' in config:
- sys.modules["boto.connection"].UserAgent = config['user_agent']
-
- for param, value in boto_params.items():
- if isinstance(value, binary_type):
- boto_params[param] = text_type(value, 'utf-8', 'strict')
-
- return region, ec2_url, boto_params
-
-
-def get_ec2_creds(module):
- ''' for compatibility mode with old modules that don't/can't yet
- use ec2_connect method '''
- region, ec2_url, boto_params = get_aws_connection_info(module)
- return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
-
-
-def boto_fix_security_token_in_profile(conn, profile_name):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + profile_name
- if boto.config.has_option(profile, 'aws_security_token'):
- conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
- return conn
-
-
-def connect_to_aws(aws_module, region, **params):
- try:
- conn = aws_module.connect_to_region(region, **params)
- except(boto.provider.ProfileNotFoundError):
- raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
- if not conn:
- if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
- raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
- "boto or extend with endpoints_path" % (region, aws_module.__name__))
- else:
- raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
- if params.get('profile_name'):
- conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
- return conn
-
-
-def ec2_connect(module):
-
- """ Return an ec2 connection"""
-
- region, ec2_url, boto_params = get_aws_connection_info(module)
-
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- ec2 = connect_to_aws(boto.ec2, region, **boto_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- elif ec2_url:
- try:
- ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="Either region or ec2_url must be specified")
-
- return ec2
-
-
-def ansible_dict_to_boto3_filter_list(filters_dict):
-
- """ Convert an Ansible dict of filters to list of dicts that boto3 can use
- Args:
- filters_dict (dict): Dict of AWS filters.
- Basic Usage:
- >>> filters = {'some-aws-id': 'i-01234567'}
- >>> ansible_dict_to_boto3_filter_list(filters)
- {
- 'some-aws-id': 'i-01234567'
- }
- Returns:
- List: List of AWS filters and their values
- [
- {
- 'Name': 'some-aws-id',
- 'Values': [
- 'i-01234567',
- ]
- }
- ]
- """
-
- filters_list = []
- for k, v in filters_dict.items():
- filter_dict = {'Name': k}
- if isinstance(v, string_types):
- filter_dict['Values'] = [v]
- else:
- filter_dict['Values'] = v
-
- filters_list.append(filter_dict)
-
- return filters_list
-
-
-def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
-
- """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
- Args:
- tags_list (list): List of dicts representing AWS tags.
- tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
- tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
- Basic Usage:
- >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
- >>> boto3_tag_list_to_ansible_dict(tags_list)
- [
- {
- 'Key': 'MyTagKey',
- 'Value': 'MyTagValue'
- }
- ]
- Returns:
- Dict: Dict of key:value pairs representing AWS tags
- {
- 'MyTagKey': 'MyTagValue',
- }
- """
-
- if tag_name_key_name and tag_value_key_name:
- tag_candidates = {tag_name_key_name: tag_value_key_name}
- else:
- tag_candidates = {'key': 'value', 'Key': 'Value'}
-
- if not tags_list:
- return {}
- for k, v in tag_candidates.items():
- if k in tags_list[0] and v in tags_list[0]:
- return dict((tag[k], tag[v]) for tag in tags_list)
- raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
-
-
-def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
-
- """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
- Args:
- tags_dict (dict): Dict representing AWS resource tags.
- tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
- tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
- Basic Usage:
- >>> tags_dict = {'MyTagKey': 'MyTagValue'}
- >>> ansible_dict_to_boto3_tag_list(tags_dict)
- {
- 'MyTagKey': 'MyTagValue'
- }
- Returns:
- List: List of dicts containing tag keys and values
- [
- {
- 'Key': 'MyTagKey',
- 'Value': 'MyTagValue'
- }
- ]
- """
-
- tags_list = []
- for k, v in tags_dict.items():
- tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
-
- return tags_list
-
-
-def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
-
- """ Return list of security group IDs from security group names. Note that security group names are not unique
- across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
- will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
- a try block
- """
-
- def get_sg_name(sg, boto3):
-
- if boto3:
- return sg['GroupName']
- else:
- return sg.name
-
- def get_sg_id(sg, boto3):
-
- if boto3:
- return sg['GroupId']
- else:
- return sg.id
-
- sec_group_id_list = []
-
- if isinstance(sec_group_list, string_types):
- sec_group_list = [sec_group_list]
-
- # Get all security groups
- if boto3:
- if vpc_id:
- filters = [
- {
- 'Name': 'vpc-id',
- 'Values': [
- vpc_id,
- ]
- }
- ]
- all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
- else:
- all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
- else:
- if vpc_id:
- filters = {'vpc-id': vpc_id}
- all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
- else:
- all_sec_groups = ec2_connection.get_all_security_groups()
-
- unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
- sec_group_name_list = list(set(sec_group_list) - set(unmatched))
-
- if len(unmatched) > 0:
- # If we have unmatched names that look like an ID, assume they are
- import re
- sec_group_id_list = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
- still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
- if len(still_unmatched) > 0:
- raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
-
- sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
-
- return sec_group_id_list
-
-
-def _hashable_policy(policy, policy_list):
- """
- Takes a policy and returns a list, the contents of which are all hashable and sorted.
- Example input policy:
- {'Version': '2012-10-17',
- 'Statement': [{'Action': 's3:PutObjectAcl',
- 'Sid': 'AddCannedAcl2',
- 'Resource': 'arn:aws:s3:::test_policy/*',
- 'Effect': 'Allow',
- 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
- }]}
- Returned value:
- [('Statement', ((('Action', (u's3:PutObjectAcl',)),
- ('Effect', (u'Allow',)),
- ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
- ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
- ('Version', (u'2012-10-17',)))]
-
- """
- # Amazon will automatically convert bool and int to strings for us
- if isinstance(policy, bool):
- return tuple([str(policy).lower()])
- elif isinstance(policy, int):
- return tuple([str(policy)])
-
- if isinstance(policy, list):
- for each in policy:
- tupleified = _hashable_policy(each, [])
- if isinstance(tupleified, list):
- tupleified = tuple(tupleified)
- policy_list.append(tupleified)
- elif isinstance(policy, string_types) or isinstance(policy, binary_type):
- policy = to_text(policy)
- # convert root account ARNs to just account IDs
- if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
- policy = policy.split(':')[4]
- return [policy]
- elif isinstance(policy, dict):
- sorted_keys = list(policy.keys())
- sorted_keys.sort()
- for key in sorted_keys:
- tupleified = _hashable_policy(policy[key], [])
- if isinstance(tupleified, list):
- tupleified = tuple(tupleified)
- policy_list.append((key, tupleified))
-
- # ensure we aren't returning deeply nested structures of length 1
- if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
- policy_list = policy_list[0]
- if isinstance(policy_list, list):
- if PY3_COMPARISON:
- policy_list.sort(key=cmp_to_key(py3cmp))
- else:
- policy_list.sort()
- return policy_list
-
-
-def py3cmp(a, b):
- """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
- try:
- if a > b:
- return 1
- elif a < b:
- return -1
- else:
- return 0
- except TypeError as e:
- # check to see if they're tuple-string
- # always say strings are less than tuples (to maintain compatibility with python2)
- str_ind = to_text(e).find('str')
- tup_ind = to_text(e).find('tuple')
- if -1 not in (str_ind, tup_ind):
- if str_ind < tup_ind:
- return -1
- elif tup_ind < str_ind:
- return 1
- raise
-
-
-def compare_policies(current_policy, new_policy):
- """ Compares the existing policy and the updated policy
- Returns True if there is a difference between policies.
- """
- return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
-
-
-def sort_json_policy_dict(policy_dict):
-
- """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
- different orders will return true
- Args:
- policy_dict (dict): Dict representing IAM JSON policy.
- Basic Usage:
- >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
- >>> sort_json_policy_dict(my_iam_policy)
- Returns:
- Dict: Will return a copy of the policy as a Dict but any List will be sorted
- {
- 'Principle': {
- 'AWS': [ '7', '14', '31', '101' ]
- }
- }
- """
-
- def value_is_list(my_list):
-
- checked_list = []
- for item in my_list:
- if isinstance(item, dict):
- checked_list.append(sort_json_policy_dict(item))
- elif isinstance(item, list):
- checked_list.append(value_is_list(item))
- else:
- checked_list.append(item)
-
- # Sort list. If it's a list of dictionaries, sort by tuple of key-value
- # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
- checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
- return checked_list
-
- ordered_policy_dict = {}
- for key, value in policy_dict.items():
- if isinstance(value, dict):
- ordered_policy_dict[key] = sort_json_policy_dict(value)
- elif isinstance(value, list):
- ordered_policy_dict[key] = value_is_list(value)
- else:
- ordered_policy_dict[key] = value
-
- return ordered_policy_dict
-
-
-def map_complex_type(complex_type, type_map):
- """
- Allows to cast elements within a dictionary to a specific type
- Example of usage:
-
- DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
- 'maximum_percent': 'int',
- 'minimum_healthy_percent': 'int'
- }
-
- deployment_configuration = map_complex_type(module.params['deployment_configuration'],
- DEPLOYMENT_CONFIGURATION_TYPE_MAP)
-
- This ensures all keys within the root element are casted and valid integers
- """
-
- if complex_type is None:
- return
- new_type = type(complex_type)()
- if isinstance(complex_type, dict):
- for key in complex_type:
- if key in type_map:
- if isinstance(type_map[key], list):
- new_type[key] = map_complex_type(
- complex_type[key],
- type_map[key][0])
- else:
- new_type[key] = map_complex_type(
- complex_type[key],
- type_map[key])
- else:
- return complex_type
- elif isinstance(complex_type, list):
- for i in range(len(complex_type)):
- new_type.append(map_complex_type(
- complex_type[i],
- type_map))
- elif type_map:
- return globals()['__builtins__'][type_map](complex_type)
- return new_type
-
-
-def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
- """
- Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
- Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
- these may not be able to be used out of the box.
-
- :param current_tags_dict:
- :param new_tags_dict:
- :param purge_tags:
- :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
- :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
- """
-
- tag_key_value_pairs_to_set = {}
- tag_keys_to_unset = []
-
- for key in current_tags_dict.keys():
- if key not in new_tags_dict and purge_tags:
- tag_keys_to_unset.append(key)
-
- for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
- if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
- tag_key_value_pairs_to_set[key] = new_tags_dict[key]
-
- return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/test/support/integration/plugins/modules/ec2.py b/test/support/integration/plugins/modules/ec2.py
deleted file mode 100644
index 1e97effd9f..0000000000
--- a/test/support/integration/plugins/modules/ec2.py
+++ /dev/null
@@ -1,1766 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2
-short_description: create, terminate, start or stop an instance in ec2
-description:
- - Creates or terminates ec2 instances.
- - >
- Note: This module uses the older boto Python module to interact with the EC2 API.
- M(ec2) will still receive bug fixes, but no new features.
- Consider using the M(ec2_instance) module instead.
- If M(ec2_instance) does not support a feature you need that is available in M(ec2), please
- file a feature request.
-version_added: "0.9"
-options:
- key_name:
- description:
- - Key pair to use on the instance.
- - The SSH key must already exist in AWS in order to use this argument.
- - Keys can be created / deleted using the M(ec2_key) module.
- aliases: ['keypair']
- type: str
- id:
- version_added: "1.1"
- description:
- - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
- - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
- - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
- type: str
- group:
- description:
- - Security group (or list of groups) to use with the instance.
- aliases: [ 'groups' ]
- type: list
- elements: str
- group_id:
- version_added: "1.1"
- description:
- - Security group id (or list of ids) to use with the instance.
- type: list
- elements: str
- zone:
- version_added: "1.2"
- description:
- - AWS availability zone in which to launch the instance.
- aliases: [ 'aws_zone', 'ec2_zone' ]
- type: str
- instance_type:
- description:
- - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
- - Required when creating a new instance.
- type: str
- aliases: ['type']
- tenancy:
- version_added: "1.9"
- description:
- - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC.
- - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well.
- - Dedicated tenancy is not available for EC2 "micro" instances.
- default: default
- choices: [ "default", "dedicated" ]
- type: str
- spot_price:
- version_added: "1.5"
- description:
- - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
- - A spot request is made with this maximum bid. When it is filled, the instance is started.
- type: str
- spot_type:
- version_added: "2.0"
- description:
- - The type of spot request.
- - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
- default: "one-time"
- choices: [ "one-time", "persistent" ]
- type: str
- image:
- description:
- - I(ami) ID to use for the instance.
- - Required when I(state=present).
- type: str
- kernel:
- description:
- - Kernel eki to use for the instance.
- type: str
- ramdisk:
- description:
- - Ramdisk eri to use for the instance.
- type: str
- wait:
- description:
- - Wait for the instance to reach its desired state before returning.
- - Does not wait for SSH, see the 'wait_for_connection' example for details.
- type: bool
- default: false
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- default: 300
- type: int
- spot_wait_timeout:
- version_added: "1.5"
- description:
- - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan.
- default: 600
- type: int
- count:
- description:
- - Number of instances to launch.
- default: 1
- type: int
- monitoring:
- version_added: "1.1"
- description:
- - Enable detailed monitoring (CloudWatch) for instance.
- type: bool
- default: false
- user_data:
- version_added: "0.9"
- description:
- - Opaque blob of data which is made available to the EC2 instance.
- type: str
- instance_tags:
- version_added: "1.0"
- description:
- - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'.
- type: dict
- placement_group:
- version_added: "1.3"
- description:
- - Placement group for the instance when using EC2 Clustered Compute.
- type: str
- vpc_subnet_id:
- version_added: "1.1"
- description:
- - the subnet ID in which to launch the instance (VPC).
- type: str
- assign_public_ip:
- version_added: "1.5"
- description:
- - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+.
- type: bool
- private_ip:
- version_added: "1.2"
- description:
- - The private ip address to assign the instance (from the vpc subnet).
- type: str
- instance_profile_name:
- version_added: "1.3"
- description:
- - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+.
- type: str
- instance_ids:
- version_added: "1.3"
- description:
- - "list of instance ids, currently used for states: absent, running, stopped"
- aliases: ['instance_id']
- type: list
- elements: str
- source_dest_check:
- version_added: "1.6"
- description:
- - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
- When initially creating an instance the EC2 API defaults this to C(True).
- type: bool
- termination_protection:
- version_added: "2.0"
- description:
- - Enable or Disable the Termination Protection.
- type: bool
- default: false
- instance_initiated_shutdown_behavior:
- version_added: "2.2"
- description:
- - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store.
- images (which require termination on shutdown).
- default: 'stop'
- choices: [ "stop", "terminate" ]
- type: str
- state:
- version_added: "1.3"
- description:
- - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2.
- - When I(state=absent), I(instance_ids) is required.
- - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required.
- default: 'present'
- choices: ['absent', 'present', 'restarted', 'running', 'stopped']
- type: str
- volumes:
- version_added: "1.5"
- description:
- - A list of hash/dictionaries of volumes to add to the new instance.
- type: list
- elements: dict
- suboptions:
- device_name:
- type: str
- required: true
- description:
- - A name for the device (For example C(/dev/sda)).
- delete_on_termination:
- type: bool
- default: false
- description:
- - Whether the volume should be automatically deleted when the instance is terminated.
- ephemeral:
- type: str
- description:
- - Whether the volume should be ephemeral.
- - Data on ephemeral volumes is lost when the instance is stopped.
- - Mutually exclusive with the I(snapshot) parameter.
- encrypted:
- type: bool
- default: false
- description:
- - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
- snapshot:
- type: str
- description:
- - The ID of an EBS snapshot to copy when creating the volume.
- - Mutually exclusive with the I(ephemeral) parameter.
- volume_type:
- type: str
- description:
- - The type of volume to create.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
- volume_size:
- type: int
- description:
- - The size of the volume (in GiB).
- iops:
- type: int
- description:
- - The number of IOPS per second to provision for the volume.
- - Required when I(volume_type=io1).
- ebs_optimized:
- version_added: "1.6"
- description:
- - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- default: false
- type: bool
- exact_count:
- version_added: "1.5"
- description:
- - An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
- Instances are either created or terminated based on this value.
- type: int
- count_tag:
- version_added: "1.5"
- description:
- - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running.
- This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
- that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option.
- type: raw
- network_interfaces:
- version_added: "2.0"
- description:
- - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
- none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are
- for creating a new network interface at launch.)
- aliases: ['network_interface']
- type: list
- elements: str
- spot_launch_group:
- version_added: "2.1"
- description:
- - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
- type: str
-author:
- - "Tim Gerla (@tgerla)"
- - "Lester Wade (@lwade)"
- - "Seth Vidal (@skvidal)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic provisioning example
-- ec2:
- key_name: mykey
- instance_type: t2.micro
- image: ami-123456
- wait: yes
- group: webserver
- count: 3
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Advanced example with tagging and CloudWatch
-- ec2:
- key_name: mykey
- group: databases
- instance_type: t2.micro
- image: ami-123456
- wait: yes
- wait_timeout: 500
- count: 5
- instance_tags:
- db: postgres
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Single instance with additional IOPS volume from snapshot and volume delete on termination
-- ec2:
- key_name: mykey
- group: webserver
- instance_type: c3.medium
- image: ami-123456
- wait: yes
- wait_timeout: 500
- volumes:
- - device_name: /dev/sdb
- snapshot: snap-abcdef12
- volume_type: io1
- iops: 1000
- volume_size: 100
- delete_on_termination: true
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Single instance with ssd gp2 root volume
-- ec2:
- key_name: mykey
- group: webserver
- instance_type: c3.medium
- image: ami-123456
- wait: yes
- wait_timeout: 500
- volumes:
- - device_name: /dev/xvda
- volume_type: gp2
- volume_size: 8
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- count_tag:
- Name: dbserver
- exact_count: 1
-
-# Multiple groups example
-- ec2:
- key_name: mykey
- group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
- instance_type: m1.large
- image: ami-6e649707
- wait: yes
- wait_timeout: 500
- count: 5
- instance_tags:
- db: postgres
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Multiple instances with additional volume from snapshot
-- ec2:
- key_name: mykey
- group: webserver
- instance_type: m1.large
- image: ami-6e649707
- wait: yes
- wait_timeout: 500
- count: 5
- volumes:
- - device_name: /dev/sdb
- snapshot: snap-abcdef12
- volume_size: 10
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Dedicated tenancy example
-- local_action:
- module: ec2
- assign_public_ip: yes
- group_id: sg-1dc53f72
- key_name: mykey
- image: ami-6e649707
- instance_type: m1.small
- tenancy: dedicated
- vpc_subnet_id: subnet-29e63245
- wait: yes
-
-# Spot instance example
-- ec2:
- spot_price: 0.24
- spot_wait_timeout: 600
- keypair: mykey
- group_id: sg-1dc53f72
- instance_type: m1.small
- image: ami-6e649707
- wait: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- spot_launch_group: report_generators
- instance_initiated_shutdown_behavior: terminate
-
-# Examples using pre-existing network interfaces
-- ec2:
- key_name: mykey
- instance_type: t2.small
- image: ami-f005ba11
- network_interface: eni-deadbeef
-
-- ec2:
- key_name: mykey
- instance_type: t2.small
- image: ami-f005ba11
- network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
-
-# Launch instances, runs some tasks
-# and then terminate them
-
-- name: Create a sandbox instance
- hosts: localhost
- gather_facts: False
- vars:
- keypair: my_keypair
- instance_type: m1.small
- security_group: my_securitygroup
- image: my_ami_id
- region: us-east-1
- tasks:
- - name: Launch instance
- ec2:
- key_name: "{{ keypair }}"
- group: "{{ security_group }}"
- instance_type: "{{ instance_type }}"
- image: "{{ image }}"
- wait: true
- region: "{{ region }}"
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- register: ec2
-
- - name: Add new instance to host group
- add_host:
- hostname: "{{ item.public_ip }}"
- groupname: launched
- loop: "{{ ec2.instances }}"
-
- - name: Wait for SSH to come up
- delegate_to: "{{ item.public_dns_name }}"
- wait_for_connection:
- delay: 60
- timeout: 320
- loop: "{{ ec2.instances }}"
-
-- name: Configure instance(s)
- hosts: launched
- become: True
- gather_facts: True
- roles:
- - my_awesome_role
- - my_awesome_test
-
-- name: Terminate instances
- hosts: localhost
- tasks:
- - name: Terminate instances that were previously launched
- ec2:
- state: 'absent'
- instance_ids: '{{ ec2.instance_ids }}'
-
-# Start a few existing instances, run some tasks
-# and stop the instances
-
-- name: Start sandbox instances
- hosts: localhost
- gather_facts: false
- vars:
- instance_ids:
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- region: us-east-1
- tasks:
- - name: Start the sandbox instances
- ec2:
- instance_ids: '{{ instance_ids }}'
- region: '{{ region }}'
- state: running
- wait: True
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- roles:
- - do_neat_stuff
- - do_more_neat_stuff
-
-- name: Stop sandbox instances
- hosts: localhost
- gather_facts: false
- vars:
- instance_ids:
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- region: us-east-1
- tasks:
- - name: Stop the sandbox instances
- ec2:
- instance_ids: '{{ instance_ids }}'
- region: '{{ region }}'
- state: stopped
- wait: True
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-#
-# Start stopped instances specified by tag
-#
-- local_action:
- module: ec2
- instance_tags:
- Name: ExtraPower
- state: running
-
-#
-# Restart instances specified by tag
-#
-- local_action:
- module: ec2
- instance_tags:
- Name: ExtraPower
- state: restarted
-
-#
-# Enforce that 5 instances with a tag "foo" are running
-# (Highly recommended!)
-#
-
-- ec2:
- key_name: mykey
- instance_type: c1.medium
- image: ami-40603AD1
- wait: yes
- group: webserver
- instance_tags:
- foo: bar
- exact_count: 5
- count_tag: foo
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-#
-# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
-#
-
-- ec2:
- key_name: mykey
- instance_type: c1.medium
- image: ami-40603AD1
- wait: yes
- group: webserver
- instance_tags:
- Name: database
- dbtype: postgres
- exact_count: 5
- count_tag:
- Name: database
- dbtype: postgres
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-#
-# count_tag complex argument examples
-#
-
- # instances with tag foo
-- ec2:
- count_tag:
- foo:
-
- # instances with tag foo=bar
-- ec2:
- count_tag:
- foo: bar
-
- # instances with tags foo=bar & baz
-- ec2:
- count_tag:
- foo: bar
- baz:
-
- # instances with tags foo & bar & baz=bang
-- ec2:
- count_tag:
- - foo
- - bar
- - baz: bang
-
-'''
-
-import time
-import datetime
-import traceback
-from ast import literal_eval
-from ansible.module_utils.compat.version import LooseVersion
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
-from ansible.module_utils.six import get_function_code, string_types
-from ansible.module_utils._text import to_bytes, to_text
-
-try:
- import boto.ec2
- from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
- from boto.exception import EC2ResponseError
- from boto import connect_ec2_endpoint
- from boto import connect_vpc
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-
-def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
-
- # get reservations for instances that match tag(s) and are in the desired state
- state = module.params.get('state')
- if state not in ['running', 'stopped']:
- state = None
- reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
-
- instances = []
- for res in reservations:
- if hasattr(res, 'instances'):
- for inst in res.instances:
- if inst.state == 'terminated' or inst.state == 'shutting-down':
- continue
- instances.append(inst)
-
- return reservations, instances
-
-
-def _set_none_to_blank(dictionary):
- result = dictionary
- for k in result:
- if isinstance(result[k], dict):
- result[k] = _set_none_to_blank(result[k])
- elif not result[k]:
- result[k] = ""
- return result
-
-
-def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
- # TODO: filters do not work with tags that have underscores
- filters = dict()
-
- vpc_subnet_id = module.params.get('vpc_subnet_id')
- vpc_id = None
- if vpc_subnet_id:
- filters.update({"subnet-id": vpc_subnet_id})
- if vpc:
- vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
-
- if vpc_id:
- filters.update({"vpc-id": vpc_id})
-
- if tags is not None:
-
- if isinstance(tags, str):
- try:
- tags = literal_eval(tags)
- except Exception:
- pass
-
- # if not a string type, convert and make sure it's a text string
- if isinstance(tags, int):
- tags = to_text(tags)
-
- # if string, we only care that a tag of that name exists
- if isinstance(tags, str):
- filters.update({"tag-key": tags})
-
- # if list, append each item to filters
- if isinstance(tags, list):
- for x in tags:
- if isinstance(x, dict):
- x = _set_none_to_blank(x)
- filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
- else:
- filters.update({"tag-key": x})
-
- # if dict, add the key and value to the filter
- if isinstance(tags, dict):
- tags = _set_none_to_blank(tags)
- filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
-
- # lets check to see if the filters dict is empty, if so then stop
- if not filters:
- module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
-
- if state:
- # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
- filters.update({'instance-state-name': state})
-
- if zone:
- filters.update({'availability-zone': zone})
-
- if module.params.get('id'):
- filters['client-token'] = module.params['id']
-
- results = ec2.get_all_instances(filters=filters)
-
- return results
-
-
-def get_instance_info(inst):
- """
- Retrieves instance information from an instance
- ID and returns it as a dictionary
- """
- instance_info = {'id': inst.id,
- 'ami_launch_index': inst.ami_launch_index,
- 'private_ip': inst.private_ip_address,
- 'private_dns_name': inst.private_dns_name,
- 'public_ip': inst.ip_address,
- 'dns_name': inst.dns_name,
- 'public_dns_name': inst.public_dns_name,
- 'state_code': inst.state_code,
- 'architecture': inst.architecture,
- 'image_id': inst.image_id,
- 'key_name': inst.key_name,
- 'placement': inst.placement,
- 'region': inst.placement[:-1],
- 'kernel': inst.kernel,
- 'ramdisk': inst.ramdisk,
- 'launch_time': inst.launch_time,
- 'instance_type': inst.instance_type,
- 'root_device_type': inst.root_device_type,
- 'root_device_name': inst.root_device_name,
- 'state': inst.state,
- 'hypervisor': inst.hypervisor,
- 'tags': inst.tags,
- 'groups': dict((group.id, group.name) for group in inst.groups),
- }
- try:
- instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
- except AttributeError:
- instance_info['virtualization_type'] = None
-
- try:
- instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
- except AttributeError:
- instance_info['ebs_optimized'] = False
-
- try:
- bdm_dict = {}
- bdm = getattr(inst, 'block_device_mapping')
- for device_name in bdm.keys():
- bdm_dict[device_name] = {
- 'status': bdm[device_name].status,
- 'volume_id': bdm[device_name].volume_id,
- 'delete_on_termination': bdm[device_name].delete_on_termination
- }
- instance_info['block_device_mapping'] = bdm_dict
- except AttributeError:
- instance_info['block_device_mapping'] = False
-
- try:
- instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
- except AttributeError:
- instance_info['tenancy'] = 'default'
-
- return instance_info
-
-
-def boto_supports_associate_public_ip_address(ec2):
- """
- Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
- class. Added in Boto 2.13.0
-
- ec2: authenticated ec2 connection object
-
- Returns:
- True if Boto library accepts associate_public_ip_address argument, else false
- """
-
- try:
- network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
- getattr(network_interface, "associate_public_ip_address")
- return True
- except AttributeError:
- return False
-
-
-def boto_supports_profile_name_arg(ec2):
- """
- Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
-
- ec2: authenticated ec2 connection object
-
- Returns:
- True if Boto library accept instance_profile_name argument, else false
- """
- run_instances_method = getattr(ec2, 'run_instances')
- return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
-
-
-def boto_supports_volume_encryption():
- """
- Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
-
- Returns:
- True if boto library has the named param as an argument on the request_spot_instances method, else False
- """
- return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
-
-
-def create_block_device(module, ec2, volume):
- # Not aware of a way to determine this programatically
- # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
- MAX_IOPS_TO_SIZE_RATIO = 30
-
- volume_type = volume.get('volume_type')
-
- if 'snapshot' not in volume and 'ephemeral' not in volume:
- if 'volume_size' not in volume:
- module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
- if 'snapshot' in volume:
- if volume_type == 'io1' and 'iops' not in volume:
- module.fail_json(msg='io1 volumes must have an iops value set')
- if 'iops' in volume:
- snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
- size = volume.get('volume_size', snapshot.volume_size)
- if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
- module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
- if 'ephemeral' in volume:
- if 'snapshot' in volume:
- module.fail_json(msg='Cannot set both ephemeral and snapshot')
- if boto_supports_volume_encryption():
- return BlockDeviceType(snapshot_id=volume.get('snapshot'),
- ephemeral_name=volume.get('ephemeral'),
- size=volume.get('volume_size'),
- volume_type=volume_type,
- delete_on_termination=volume.get('delete_on_termination', False),
- iops=volume.get('iops'),
- encrypted=volume.get('encrypted', None))
- else:
- return BlockDeviceType(snapshot_id=volume.get('snapshot'),
- ephemeral_name=volume.get('ephemeral'),
- size=volume.get('volume_size'),
- volume_type=volume_type,
- delete_on_termination=volume.get('delete_on_termination', False),
- iops=volume.get('iops'))
-
-
-def boto_supports_param_in_spot_request(ec2, param):
- """
- Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
-
- ec2: authenticated ec2 connection object
-
- Returns:
- True if boto library has the named param as an argument on the request_spot_instances method, else False
- """
- method = getattr(ec2, 'request_spot_instances')
- return param in get_function_code(method).co_varnames
-
-
-def await_spot_requests(module, ec2, spot_requests, count):
- """
- Wait for a group of spot requests to be fulfilled, or fail.
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
- count: Total number of instances to be created by the spot requests
-
- Returns:
- list of instance ID's created by the spot request(s)
- """
- spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
- wait_complete = time.time() + spot_wait_timeout
-
- spot_req_inst_ids = dict()
- while time.time() < wait_complete:
- reqs = ec2.get_all_spot_instance_requests()
- for sirb in spot_requests:
- if sirb.id in spot_req_inst_ids:
- continue
- for sir in reqs:
- if sir.id != sirb.id:
- continue # this is not our spot instance
- if sir.instance_id is not None:
- spot_req_inst_ids[sirb.id] = sir.instance_id
- elif sir.state == 'open':
- continue # still waiting, nothing to do here
- elif sir.state == 'active':
- continue # Instance is created already, nothing to do here
- elif sir.state == 'failed':
- module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
- sir.id, sir.status.code, sir.fault.code, sir.fault.message))
- elif sir.state == 'cancelled':
- module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
- elif sir.state == 'closed':
- # instance is terminating or marked for termination
- # this may be intentional on the part of the operator,
- # or it may have been terminated by AWS due to capacity,
- # price, or group constraints in this case, we'll fail
- # the module if the reason for the state is anything
- # other than termination by user. Codes are documented at
- # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
- if sir.status.code == 'instance-terminated-by-user':
- # do nothing, since the user likely did this on purpose
- pass
- else:
- spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
- module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
-
- if len(spot_req_inst_ids) < count:
- time.sleep(5)
- else:
- return list(spot_req_inst_ids.values())
- module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
-
-
-def enforce_count(module, ec2, vpc):
-
- exact_count = module.params.get('exact_count')
- count_tag = module.params.get('count_tag')
- zone = module.params.get('zone')
-
- # fail here if the exact count was specified without filtering
- # on a tag, as this may lead to a undesired removal of instances
- if exact_count and count_tag is None:
- module.fail_json(msg="you must use the 'count_tag' option with exact_count")
-
- reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
-
- changed = None
- checkmode = False
- instance_dict_array = []
- changed_instance_ids = None
-
- if len(instances) == exact_count:
- changed = False
- elif len(instances) < exact_count:
- changed = True
- to_create = exact_count - len(instances)
- if not checkmode:
- (instance_dict_array, changed_instance_ids, changed) \
- = create_instances(module, ec2, vpc, override_count=to_create)
-
- for inst in instance_dict_array:
- instances.append(inst)
- elif len(instances) > exact_count:
- changed = True
- to_remove = len(instances) - exact_count
- if not checkmode:
- all_instance_ids = sorted([x.id for x in instances])
- remove_ids = all_instance_ids[0:to_remove]
-
- instances = [x for x in instances if x.id not in remove_ids]
-
- (changed, instance_dict_array, changed_instance_ids) \
- = terminate_instances(module, ec2, remove_ids)
- terminated_list = []
- for inst in instance_dict_array:
- inst['state'] = "terminated"
- terminated_list.append(inst)
- instance_dict_array = terminated_list
-
- # ensure all instances are dictionaries
- all_instances = []
- for inst in instances:
-
- if not isinstance(inst, dict):
- warn_if_public_ip_assignment_changed(module, inst)
- inst = get_instance_info(inst)
- all_instances.append(inst)
-
- return (all_instances, instance_dict_array, changed_instance_ids, changed)
-
-
-def create_instances(module, ec2, vpc, override_count=None):
- """
- Creates new instances
-
- module : AnsibleModule object
- ec2: authenticated ec2 connection object
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched
- """
-
- key_name = module.params.get('key_name')
- id = module.params.get('id')
- group_name = module.params.get('group')
- group_id = module.params.get('group_id')
- zone = module.params.get('zone')
- instance_type = module.params.get('instance_type')
- tenancy = module.params.get('tenancy')
- spot_price = module.params.get('spot_price')
- spot_type = module.params.get('spot_type')
- image = module.params.get('image')
- if override_count:
- count = override_count
- else:
- count = module.params.get('count')
- monitoring = module.params.get('monitoring')
- kernel = module.params.get('kernel')
- ramdisk = module.params.get('ramdisk')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
- spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
- placement_group = module.params.get('placement_group')
- user_data = module.params.get('user_data')
- instance_tags = module.params.get('instance_tags')
- vpc_subnet_id = module.params.get('vpc_subnet_id')
- assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
- private_ip = module.params.get('private_ip')
- instance_profile_name = module.params.get('instance_profile_name')
- volumes = module.params.get('volumes')
- ebs_optimized = module.params.get('ebs_optimized')
- exact_count = module.params.get('exact_count')
- count_tag = module.params.get('count_tag')
- source_dest_check = module.boolean(module.params.get('source_dest_check'))
- termination_protection = module.boolean(module.params.get('termination_protection'))
- network_interfaces = module.params.get('network_interfaces')
- spot_launch_group = module.params.get('spot_launch_group')
- instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
-
- vpc_id = None
- if vpc_subnet_id:
- if not vpc:
- module.fail_json(msg="region must be specified")
- else:
- vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
- else:
- vpc_id = None
-
- try:
- # Here we try to lookup the group id from the security group name - if group is set.
- if group_name:
- if vpc_id:
- grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
- else:
- grp_details = ec2.get_all_security_groups()
- if isinstance(group_name, string_types):
- group_name = [group_name]
- unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
- if len(unmatched) > 0:
- module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
- group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
- # Now we try to lookup the group id testing if group exists.
- elif group_id:
- # wrap the group_id in a list if it's not one already
- if isinstance(group_id, string_types):
- group_id = [group_id]
- grp_details = ec2.get_all_security_groups(group_ids=group_id)
- group_name = [grp_item.name for grp_item in grp_details]
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
-
- # Lookup any instances that much our run id.
-
- running_instances = []
- count_remaining = int(count)
-
- if id is not None:
- filter_dict = {'client-token': id, 'instance-state-name': 'running'}
- previous_reservations = ec2.get_all_instances(None, filter_dict)
- for res in previous_reservations:
- for prev_instance in res.instances:
- running_instances.append(prev_instance)
- count_remaining = count_remaining - len(running_instances)
-
- # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
-
- if count_remaining == 0:
- changed = False
- else:
- changed = True
- try:
- params = {'image_id': image,
- 'key_name': key_name,
- 'monitoring_enabled': monitoring,
- 'placement': zone,
- 'instance_type': instance_type,
- 'kernel_id': kernel,
- 'ramdisk_id': ramdisk}
- if user_data is not None:
- params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict')
-
- if ebs_optimized:
- params['ebs_optimized'] = ebs_optimized
-
- # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
- if not spot_price:
- params['tenancy'] = tenancy
-
- if boto_supports_profile_name_arg(ec2):
- params['instance_profile_name'] = instance_profile_name
- else:
- if instance_profile_name is not None:
- module.fail_json(
- msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
-
- if assign_public_ip is not None:
- if not boto_supports_associate_public_ip_address(ec2):
- module.fail_json(
- msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
- elif not vpc_subnet_id:
- module.fail_json(
- msg="assign_public_ip only available with vpc_subnet_id")
-
- else:
- if private_ip:
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- subnet_id=vpc_subnet_id,
- private_ip_address=private_ip,
- groups=group_id,
- associate_public_ip_address=assign_public_ip)
- else:
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- subnet_id=vpc_subnet_id,
- groups=group_id,
- associate_public_ip_address=assign_public_ip)
- interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
- params['network_interfaces'] = interfaces
- else:
- if network_interfaces:
- if isinstance(network_interfaces, string_types):
- network_interfaces = [network_interfaces]
- interfaces = []
- for i, network_interface_id in enumerate(network_interfaces):
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- network_interface_id=network_interface_id,
- device_index=i)
- interfaces.append(interface)
- params['network_interfaces'] = \
- boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
- else:
- params['subnet_id'] = vpc_subnet_id
- if vpc_subnet_id:
- params['security_group_ids'] = group_id
- else:
- params['security_groups'] = group_name
-
- if volumes:
- bdm = BlockDeviceMapping()
- for volume in volumes:
- if 'device_name' not in volume:
- module.fail_json(msg='Device name must be set for volume')
- # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0
- # to be a signal not to create this volume
- if 'volume_size' not in volume or int(volume['volume_size']) > 0:
- bdm[volume['device_name']] = create_block_device(module, ec2, volume)
-
- params['block_device_map'] = bdm
-
- # check to see if we're using spot pricing first before starting instances
- if not spot_price:
- if assign_public_ip is not None and private_ip:
- params.update(
- dict(
- min_count=count_remaining,
- max_count=count_remaining,
- client_token=id,
- placement_group=placement_group,
- )
- )
- else:
- params.update(
- dict(
- min_count=count_remaining,
- max_count=count_remaining,
- client_token=id,
- placement_group=placement_group,
- private_ip_address=private_ip,
- )
- )
-
- # For ordinary (not spot) instances, we can select 'stop'
- # (the default) or 'terminate' here.
- params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
-
- try:
- res = ec2.run_instances(**params)
- except boto.exception.EC2ResponseError as e:
- if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
- "InvalidParameterCombination" == e.error_code):
- params['instance_initiated_shutdown_behavior'] = 'terminate'
- res = ec2.run_instances(**params)
- else:
- raise
-
- instids = [i.id for i in res.instances]
- while True:
- try:
- ec2.get_all_instances(instids)
- break
- except boto.exception.EC2ResponseError as e:
- if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
- # there's a race between start and get an instance
- continue
- else:
- module.fail_json(msg=str(e))
-
- # The instances returned through ec2.run_instances above can be in
- # terminated state due to idempotency. See commit 7f11c3d for a complete
- # explanation.
- terminated_instances = [
- str(instance.id) for instance in res.instances if instance.state == 'terminated'
- ]
- if terminated_instances:
- module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
- "were created previously but have since been terminated - " +
- "use a (possibly different) 'instanceid' parameter")
-
- else:
- if private_ip:
- module.fail_json(
- msg='private_ip only available with on-demand (non-spot) instances')
- if boto_supports_param_in_spot_request(ec2, 'placement_group'):
- params['placement_group'] = placement_group
- elif placement_group:
- module.fail_json(
- msg="placement_group parameter requires Boto version 2.3.0 or higher.")
-
- # You can't tell spot instances to 'stop'; they will always be
- # 'terminate'd. For convenience, we'll ignore the latter value.
- if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
- module.fail_json(
- msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
-
- if spot_launch_group and isinstance(spot_launch_group, string_types):
- params['launch_group'] = spot_launch_group
-
- params.update(dict(
- count=count_remaining,
- type=spot_type,
- ))
-
- # Set spot ValidUntil
- # ValidUntil -> (timestamp). The end date of the request, in
- # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
- utc_valid_until = (
- datetime.datetime.utcnow()
- + datetime.timedelta(seconds=spot_wait_timeout))
- params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
-
- res = ec2.request_spot_instances(spot_price, **params)
-
- # Now we have to do the intermediate waiting
- if wait:
- instids = await_spot_requests(module, ec2, res, count)
- else:
- instids = []
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
-
- # wait here until the instances are up
- num_running = 0
- wait_timeout = time.time() + wait_timeout
- res_list = ()
- while wait_timeout > time.time() and num_running < len(instids):
- try:
- res_list = ec2.get_all_instances(instids)
- except boto.exception.BotoServerError as e:
- if e.error_code == 'InvalidInstanceID.NotFound':
- time.sleep(1)
- continue
- else:
- raise
-
- num_running = 0
- for res in res_list:
- num_running += len([i for i in res.instances if i.state == 'running'])
- if len(res_list) <= 0:
- # got a bad response of some sort, possibly due to
- # stale/cached data. Wait a second and then try again
- time.sleep(1)
- continue
- if wait and num_running < len(instids):
- time.sleep(5)
- else:
- break
-
- if wait and wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
-
- # We do this after the loop ends so that we end up with one list
- for res in res_list:
- running_instances.extend(res.instances)
-
- # Enabled by default by AWS
- if source_dest_check is False:
- for inst in res.instances:
- inst.modify_attribute('sourceDestCheck', False)
-
- # Disabled by default by AWS
- if termination_protection is True:
- for inst in res.instances:
- inst.modify_attribute('disableApiTermination', True)
-
- # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
- if instance_tags and instids:
- try:
- ec2.create_tags(instids, instance_tags)
- except boto.exception.EC2ResponseError as e:
- module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
-
- instance_dict_array = []
- created_instance_ids = []
- for inst in running_instances:
- inst.update()
- d = get_instance_info(inst)
- created_instance_ids.append(inst.id)
- instance_dict_array.append(d)
-
- return (instance_dict_array, created_instance_ids, changed)
-
-
-def terminate_instances(module, ec2, instance_ids):
- """
- Terminates a list of instances
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- termination_list: a list of instances to terminate in the form of
- [ {id: <inst-id>}, ..]
-
- Returns a dictionary of instance information
- about the instances terminated.
-
- If the instance to be terminated is running
- "changed" will be set to False.
-
- """
-
- # Whether to wait for termination to complete before returning
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- changed = False
- instance_dict_array = []
-
- if not isinstance(instance_ids, list) or len(instance_ids) < 1:
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- terminated_instance_ids = []
- for res in ec2.get_all_instances(instance_ids):
- for inst in res.instances:
- if inst.state == 'running' or inst.state == 'stopped':
- terminated_instance_ids.append(inst.id)
- instance_dict_array.append(get_instance_info(inst))
- try:
- ec2.terminate_instances([inst.id])
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
- changed = True
-
- # wait here until the instances are 'terminated'
- if wait:
- num_terminated = 0
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
- response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
- filters={'instance-state-name': 'terminated'})
- try:
- num_terminated = sum([len(res.instances) for res in response])
- except Exception as e:
- # got a bad response of some sort, possibly due to
- # stale/cached data. Wait a second and then try again
- time.sleep(1)
- continue
-
- if num_terminated < len(terminated_instance_ids):
- time.sleep(5)
-
- # waiting took too long
- if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
- module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
- # Lets get the current state of the instances after terminating - issue600
- instance_dict_array = []
- for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
- for inst in res.instances:
- instance_dict_array.append(get_instance_info(inst))
-
- return (changed, instance_dict_array, terminated_instance_ids)
-
-
-def startstop_instances(module, ec2, instance_ids, state, instance_tags):
- """
- Starts or stops a list of existing instances
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- instance_ids: The list of instances to start in the form of
- [ {id: <inst-id>}, ..]
- instance_tags: A dict of tag keys and values in the form of
- {key: value, ... }
- state: Intended state ("running" or "stopped")
-
- Returns a dictionary of instance information
- about the instances started/stopped.
-
- If the instance was not able to change state,
- "changed" will be set to False.
-
- Note that if instance_ids and instance_tags are both non-empty,
- this method will process the intersection of the two
- """
-
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
- group_id = module.params.get('group_id')
- group_name = module.params.get('group')
- changed = False
- instance_dict_array = []
-
- if not isinstance(instance_ids, list) or len(instance_ids) < 1:
- # Fail unless the user defined instance tags
- if not instance_tags:
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
- # An empty filter does no filtering, so it's safe to pass it to the
- # get_all_instances method even if the user did not specify instance_tags
- filters = {}
- if instance_tags:
- for key, value in instance_tags.items():
- filters["tag:" + key] = value
-
- if module.params.get('id'):
- filters['client-token'] = module.params['id']
- # Check that our instances are not in the state we want to take
-
- # Check (and eventually change) instances attributes and instances state
- existing_instances_array = []
- for res in ec2.get_all_instances(instance_ids, filters=filters):
- for inst in res.instances:
-
- warn_if_public_ip_assignment_changed(module, inst)
-
- changed = (check_source_dest_attr(module, inst, ec2) or
- check_termination_protection(module, inst) or changed)
-
- # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
- if inst.vpc_id and group_name:
- grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
- if isinstance(group_name, string_types):
- group_name = [group_name]
- unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
- if unmatched:
- module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
- group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
- elif inst.vpc_id and group_id:
- if isinstance(group_id, string_types):
- group_id = [group_id]
- grp_details = ec2.get_all_security_groups(group_ids=group_id)
- group_ids = [grp_item.id for grp_item in grp_details]
- if inst.vpc_id and (group_name or group_id):
- if set(sg.id for sg in inst.groups) != set(group_ids):
- changed = inst.modify_attribute('groupSet', group_ids)
-
- # Check instance state
- if inst.state != state:
- instance_dict_array.append(get_instance_info(inst))
- try:
- if state == 'running':
- inst.start()
- else:
- inst.stop()
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
- changed = True
- existing_instances_array.append(inst.id)
-
- instance_ids = list(set(existing_instances_array + (instance_ids or [])))
- # Wait for all the instances to finish starting or stopping
- wait_timeout = time.time() + wait_timeout
- while wait and wait_timeout > time.time():
- instance_dict_array = []
- matched_instances = []
- for res in ec2.get_all_instances(instance_ids):
- for i in res.instances:
- if i.state == state:
- instance_dict_array.append(get_instance_info(i))
- matched_instances.append(i)
- if len(matched_instances) < len(instance_ids):
- time.sleep(5)
- else:
- break
-
- if wait and wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
-
- return (changed, instance_dict_array, instance_ids)
-
-
-def restart_instances(module, ec2, instance_ids, state, instance_tags):
- """
- Restarts a list of existing instances
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- instance_ids: The list of instances to start in the form of
- [ {id: <inst-id>}, ..]
- instance_tags: A dict of tag keys and values in the form of
- {key: value, ... }
- state: Intended state ("restarted")
-
- Returns a dictionary of instance information
- about the instances.
-
- If the instance was not able to change state,
- "changed" will be set to False.
-
- Wait will not apply here as this is a OS level operation.
-
- Note that if instance_ids and instance_tags are both non-empty,
- this method will process the intersection of the two.
- """
-
- changed = False
- instance_dict_array = []
-
- if not isinstance(instance_ids, list) or len(instance_ids) < 1:
- # Fail unless the user defined instance tags
- if not instance_tags:
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
- # An empty filter does no filtering, so it's safe to pass it to the
- # get_all_instances method even if the user did not specify instance_tags
- filters = {}
- if instance_tags:
- for key, value in instance_tags.items():
- filters["tag:" + key] = value
- if module.params.get('id'):
- filters['client-token'] = module.params['id']
-
- # Check that our instances are not in the state we want to take
-
- # Check (and eventually change) instances attributes and instances state
- for res in ec2.get_all_instances(instance_ids, filters=filters):
- for inst in res.instances:
-
- warn_if_public_ip_assignment_changed(module, inst)
-
- changed = (check_source_dest_attr(module, inst, ec2) or
- check_termination_protection(module, inst) or changed)
-
- # Check instance state
- if inst.state != state:
- instance_dict_array.append(get_instance_info(inst))
- try:
- inst.reboot()
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
- changed = True
-
- return (changed, instance_dict_array, instance_ids)
-
-
-def check_termination_protection(module, inst):
- """
- Check the instance disableApiTermination attribute.
-
- module: Ansible module object
- inst: EC2 instance object
-
- returns: True if state changed None otherwise
- """
-
- termination_protection = module.params.get('termination_protection')
-
- if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
- inst.modify_attribute('disableApiTermination', termination_protection)
- return True
-
-
-def check_source_dest_attr(module, inst, ec2):
- """
- Check the instance sourceDestCheck attribute.
-
- module: Ansible module object
- inst: EC2 instance object
-
- returns: True if state changed None otherwise
- """
-
- source_dest_check = module.params.get('source_dest_check')
-
- if source_dest_check is not None:
- try:
- if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
- inst.modify_attribute('sourceDestCheck', source_dest_check)
- return True
- except boto.exception.EC2ResponseError as exc:
- # instances with more than one Elastic Network Interface will
- # fail, because they have the sourceDestCheck attribute defined
- # per-interface
- if exc.code == 'InvalidInstanceID':
- for interface in inst.interfaces:
- if interface.source_dest_check != source_dest_check:
- ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
- return True
- else:
- module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
- exception=traceback.format_exc())
-
-
-def warn_if_public_ip_assignment_changed(module, instance):
- # This is a non-modifiable attribute.
- assign_public_ip = module.params.get('assign_public_ip')
-
- # Check that public ip assignment is the same and warn if not
- public_dns_name = getattr(instance, 'public_dns_name', None)
- if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
- module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
- "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- key_name=dict(aliases=['keypair']),
- id=dict(),
- group=dict(type='list', aliases=['groups']),
- group_id=dict(type='list'),
- zone=dict(aliases=['aws_zone', 'ec2_zone']),
- instance_type=dict(aliases=['type']),
- spot_price=dict(),
- spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
- spot_launch_group=dict(),
- image=dict(),
- kernel=dict(),
- count=dict(type='int', default='1'),
- monitoring=dict(type='bool', default=False),
- ramdisk=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- spot_wait_timeout=dict(type='int', default=600),
- placement_group=dict(),
- user_data=dict(),
- instance_tags=dict(type='dict'),
- vpc_subnet_id=dict(),
- assign_public_ip=dict(type='bool'),
- private_ip=dict(),
- instance_profile_name=dict(),
- instance_ids=dict(type='list', aliases=['instance_id']),
- source_dest_check=dict(type='bool', default=None),
- termination_protection=dict(type='bool', default=None),
- state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
- instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
- exact_count=dict(type='int', default=None),
- count_tag=dict(type='raw'),
- volumes=dict(type='list'),
- ebs_optimized=dict(type='bool', default=False),
- tenancy=dict(default='default', choices=['default', 'dedicated']),
- network_interfaces=dict(type='list', aliases=['network_interface'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- # Can be uncommented when we finish the deprecation cycle.
- # ['group', 'group_id'],
- ['exact_count', 'count'],
- ['exact_count', 'state'],
- ['exact_count', 'instance_ids'],
- ['network_interfaces', 'assign_public_ip'],
- ['network_interfaces', 'group'],
- ['network_interfaces', 'group_id'],
- ['network_interfaces', 'private_ip'],
- ['network_interfaces', 'vpc_subnet_id'],
- ],
- )
-
- if module.params.get('group') and module.params.get('group_id'):
- module.deprecate(
- msg='Support for passing both group and group_id has been deprecated. '
- 'Currently group_id is ignored, in future passing both will result in an error',
- version='2.14', collection_name='ansible.builtin')
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
- if module.params.get('region') or not module.params.get('ec2_url'):
- ec2 = ec2_connect(module)
- elif module.params.get('ec2_url'):
- ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
-
- if 'region' not in aws_connect_kwargs:
- aws_connect_kwargs['region'] = ec2.region
-
- vpc = connect_vpc(**aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
-
- tagged_instances = []
-
- state = module.params['state']
-
- if state == 'absent':
- instance_ids = module.params['instance_ids']
- if not instance_ids:
- module.fail_json(msg='instance_ids list is required for absent state')
-
- (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
-
- elif state in ('running', 'stopped'):
- instance_ids = module.params.get('instance_ids')
- instance_tags = module.params.get('instance_tags')
- if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
- module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
-
- (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
-
- elif state in ('restarted'):
- instance_ids = module.params.get('instance_ids')
- instance_tags = module.params.get('instance_tags')
- if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
- module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
-
- (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
-
- elif state == 'present':
- # Changed is always set to true when provisioning new instances
- if not module.params.get('image'):
- module.fail_json(msg='image parameter is required for new instance')
-
- if module.params.get('exact_count') is None:
- (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
- else:
- (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
-
- # Always return instances in the same order
- if new_instance_ids:
- new_instance_ids.sort()
- if instance_dict_array:
- instance_dict_array.sort(key=lambda x: x['id'])
- if tagged_instances:
- tagged_instances.sort(key=lambda x: x['id'])
-
- module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py
deleted file mode 100644
index 26f8694634..0000000000
--- a/test/support/integration/plugins/modules/ec2_ami_info.py
+++ /dev/null
@@ -1,279 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_ami_info
-version_added: '2.5'
-short_description: Gather information about ec2 AMIs
-description:
- - Gather information about ec2 AMIs
- - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
-author:
- - Prasad Katti (@prasadkatti)
-requirements: [ boto3 ]
-options:
- image_ids:
- description: One or more image IDs.
- aliases: [image_id]
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
- - Filter names and values are case sensitive.
- type: dict
- owners:
- description:
- - Filter the images by the owner. Valid options are an AWS account ID, self,
- or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
- aliases: [owner]
- type: list
- elements: str
- executable_users:
- description:
- - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
- aliases: [executable_user]
- type: list
- elements: str
- describe_image_attributes:
- description:
- - Describe attributes (like launchPermission) of the images found.
- default: no
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: gather information about an AMI using ami-id
- ec2_ami_info:
- image_ids: ami-5b488823
-
-- name: gather information about all AMIs with tag key Name and value webapp
- ec2_ami_info:
- filters:
- "tag:Name": webapp
-
-- name: gather information about an AMI with 'AMI Name' equal to foobar
- ec2_ami_info:
- filters:
- name: foobar
-
-- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
- ec2_ami_info:
- owners: 099720109477
- filters:
- name: "ubuntu/images/ubuntu-zesty-17.04-*"
-'''
-
-RETURN = '''
-images:
- description: A list of images.
- returned: always
- type: list
- elements: dict
- contains:
- architecture:
- description: The architecture of the image.
- returned: always
- type: str
- sample: x86_64
- block_device_mappings:
- description: Any block device mapping entries.
- returned: always
- type: list
- elements: dict
- contains:
- device_name:
- description: The device name exposed to the instance.
- returned: always
- type: str
- sample: /dev/sda1
- ebs:
- description: EBS volumes
- returned: always
- type: complex
- creation_date:
- description: The date and time the image was created.
- returned: always
- type: str
- sample: '2017-10-16T19:22:13.000Z'
- description:
- description: The description of the AMI.
- returned: always
- type: str
- sample: ''
- ena_support:
- description: Whether enhanced networking with ENA is enabled.
- returned: always
- type: bool
- sample: true
- hypervisor:
- description: The hypervisor type of the image.
- returned: always
- type: str
- sample: xen
- image_id:
- description: The ID of the AMI.
- returned: always
- type: str
- sample: ami-5b466623
- image_location:
- description: The location of the AMI.
- returned: always
- type: str
- sample: 408466080000/Webapp
- image_type:
- description: The type of image.
- returned: always
- type: str
- sample: machine
- launch_permissions:
- description: A List of AWS accounts may launch the AMI.
- returned: When image is owned by calling account and I(describe_image_attributes) is yes.
- type: list
- elements: dict
- contains:
- group:
- description: A value of 'all' means the AMI is public.
- type: str
- user_id:
- description: An AWS account ID with permissions to launch the AMI.
- type: str
- sample: [{"group": "all"}, {"user_id": "408466080000"}]
- name:
- description: The name of the AMI that was provided during image creation.
- returned: always
- type: str
- sample: Webapp
- owner_id:
- description: The AWS account ID of the image owner.
- returned: always
- type: str
- sample: '408466080000'
- public:
- description: Whether the image has public launch permissions.
- returned: always
- type: bool
- sample: true
- root_device_name:
- description: The device name of the root device.
- returned: always
- type: str
- sample: /dev/sda1
- root_device_type:
- description: The type of root device used by the AMI.
- returned: always
- type: str
- sample: ebs
- sriov_net_support:
- description: Whether enhanced networking is enabled.
- returned: always
- type: str
- sample: simple
- state:
- description: The current state of the AMI.
- returned: always
- type: str
- sample: available
- tags:
- description: Any tags assigned to the image.
- returned: always
- type: dict
- virtualization_type:
- description: The type of virtualization of the AMI.
- returned: always
- type: str
- sample: hvm
-'''
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
-
-
-def list_ec2_images(ec2_client, module):
-
- image_ids = module.params.get("image_ids")
- owners = module.params.get("owners")
- executable_users = module.params.get("executable_users")
- filters = module.params.get("filters")
- owner_param = []
-
- # describe_images is *very* slow if you pass the `Owners`
- # param (unless it's self), for some reason.
- # Converting the owners to filters and removing from the
- # owners param greatly speeds things up.
- # Implementation based on aioue's suggestion in #24886
- for owner in owners:
- if owner.isdigit():
- if 'owner-id' not in filters:
- filters['owner-id'] = list()
- filters['owner-id'].append(owner)
- elif owner == 'self':
- # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- owner_param.append(owner)
- else:
- if 'owner-alias' not in filters:
- filters['owner-alias'] = list()
- filters['owner-alias'].append(owner)
-
- filters = ansible_dict_to_boto3_filter_list(filters)
-
- try:
- images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
- images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
- except (ClientError, BotoCoreError) as err:
- module.fail_json_aws(err, msg="error describing images")
- for image in images:
- try:
- image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
- if module.params.get("describe_image_attributes"):
- launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
- image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
- except (ClientError, BotoCoreError) as err:
- # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
- pass
-
- images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
- module.exit_json(images=images)
-
-
-def main():
-
- argument_spec = dict(
- image_ids=dict(default=[], type='list', aliases=['image_id']),
- filters=dict(default={}, type='dict'),
- owners=dict(default=[], type='list', aliases=['owner']),
- executable_users=dict(default=[], type='list', aliases=['executable_user']),
- describe_image_attributes=dict(default=False, type='bool')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- ec2_client = module.client('ec2')
-
- list_ec2_images(ec2_client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/ec2_group.py b/test/support/integration/plugins/modules/ec2_group.py
deleted file mode 100644
index bc416f66b5..0000000000
--- a/test/support/integration/plugins/modules/ec2_group.py
+++ /dev/null
@@ -1,1345 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-DOCUMENTATION = '''
----
-module: ec2_group
-author: "Andrew de Quincey (@adq)"
-version_added: "1.3"
-requirements: [ boto3 ]
-short_description: maintain an ec2 VPC security group.
-description:
- - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
-options:
- name:
- description:
- - Name of the security group.
- - One of and only one of I(name) or I(group_id) is required.
- - Required if I(state=present).
- required: false
- type: str
- group_id:
- description:
- - Id of group to delete (works only with absent).
- - One of and only one of I(name) or I(group_id) is required.
- required: false
- version_added: "2.4"
- type: str
- description:
- description:
- - Description of the security group. Required when C(state) is C(present).
- required: false
- type: str
- vpc_id:
- description:
- - ID of the VPC to create the group in.
- required: false
- type: str
- rules:
- description:
- - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
- no inbound rules will be enabled. Rules list may include its own name in `group_name`.
- This allows idempotent loopback additions (e.g. allow group to access itself).
- Rule sources list support was added in version 2.4. This allows to define multiple sources per
- source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
- In version 2.5 support for rule descriptions was added.
- required: false
- type: list
- elements: dict
- suboptions:
- cidr_ip:
- type: str
- description:
- - The IPv4 CIDR range traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- cidr_ipv6:
- type: str
- description:
- - The IPv6 CIDR range traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- ip_prefix:
- type: str
- description:
- - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
- that traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_id:
- type: str
- description:
- - The ID of the Security Group that traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_name:
- type: str
- description:
- - Name of the Security Group that traffic is coming from.
- - If the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_desc:
- type: str
- description:
- - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- proto:
- type: str
- description:
- - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
- from_port:
- type: int
- description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
- to_port:
- type: int
- description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
- rule_desc:
- type: str
- description: A description for the rule.
- rules_egress:
- description:
- - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
- a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
- Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
- was added.
- required: false
- version_added: "1.6"
- type: list
- elements: dict
- suboptions:
- cidr_ip:
- type: str
- description:
- - The IPv4 CIDR range traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- cidr_ipv6:
- type: str
- description:
- - The IPv6 CIDR range traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- ip_prefix:
- type: str
- description:
- - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
- that traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_id:
- type: str
- description:
- - The ID of the Security Group that traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_name:
- type: str
- description:
- - Name of the Security Group that traffic is going to.
- - If the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_desc:
- type: str
- description:
- - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- proto:
- type: str
- description:
- - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
- from_port:
- type: int
- description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
- to_port:
- type: int
- description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
- rule_desc:
- type: str
- description: A description for the rule.
- state:
- version_added: "1.4"
- description:
- - Create or delete a security group.
- required: false
- default: 'present'
- choices: [ "present", "absent" ]
- aliases: []
- type: str
- purge_rules:
- version_added: "1.8"
- description:
- - Purge existing rules on security group that are not found in rules.
- required: false
- default: 'true'
- aliases: []
- type: bool
- purge_rules_egress:
- version_added: "1.8"
- description:
- - Purge existing rules_egress on security group that are not found in rules_egress.
- required: false
- default: 'true'
- aliases: []
- type: bool
- tags:
- version_added: "2.4"
- description:
- - A dictionary of one or more tags to assign to the security group.
- required: false
- type: dict
- aliases: ['resource_tags']
- purge_tags:
- version_added: "2.4"
- description:
- - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
- tags will not be modified.
- required: false
- default: yes
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-notes:
- - If a rule declares a group_name and that group doesn't exist, it will be
- automatically created. In that case, group_desc should be provided as well.
- The module will refuse to create a depended-on group without a description.
- - Preview diff mode support is added in version 2.7.
-'''
-
-EXAMPLES = '''
-- name: example using security group rule descriptions
- ec2_group:
- name: "{{ name }}"
- description: sg with rule descriptions
- vpc_id: vpc-xxxxxxxx
- profile: "{{ aws_profile }}"
- region: us-east-1
- rules:
- - proto: tcp
- ports:
- - 80
- cidr_ip: 0.0.0.0/0
- rule_desc: allow all on port 80
-
-- name: example ec2 group
- ec2_group:
- name: example
- description: an example EC2 group
- vpc_id: 12345
- region: eu-west-1
- aws_secret_key: SECRET
- aws_access_key: ACCESS
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 10.0.0.0/8
- - proto: tcp
- from_port: 443
- to_port: 443
- # this should only be needed for EC2 Classic security group rules
- # because in a VPC an ELB will use a user-account security group
- group_id: amazon-elb/sg-87654321/amazon-elb-sg
- - proto: tcp
- from_port: 3306
- to_port: 3306
- group_id: 123412341234/sg-87654321/exact-name-of-sg
- - proto: udp
- from_port: 10050
- to_port: 10050
- cidr_ip: 10.0.0.0/8
- - proto: udp
- from_port: 10051
- to_port: 10051
- group_id: sg-12345678
- - proto: icmp
- from_port: 8 # icmp type, -1 = any type
- to_port: -1 # icmp subtype, -1 = any subtype
- cidr_ip: 10.0.0.0/8
- - proto: all
- # the containing group name may be specified here
- group_name: example
- - proto: all
- # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
- # traffic on all ports is allowed, regardless of any ports you specify
- from_port: 10050 # this value is ignored
- to_port: 10050 # this value is ignored
- cidr_ip: 10.0.0.0/8
-
- rules_egress:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- cidr_ipv6: 64:ff9b::/96
- group_name: example-other
- # description to use if example-other needs to be created
- group_desc: other example EC2 group
-
-- name: example2 ec2 group
- ec2_group:
- name: example2
- description: an example2 EC2 group
- vpc_id: 12345
- region: eu-west-1
- rules:
- # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- - proto: tcp
- ports: 22
- group_name: example-vpn
- - proto: tcp
- ports:
- - 80
- - 443
- - 8080-8099
- cidr_ip: 0.0.0.0/0
- # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- - proto: tcp
- ports:
- - 6379
- - 26379
- group_name:
- - example-vpn
- - example-redis
- - proto: tcp
- ports: 5665
- group_name: example-vpn
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- cidr_ipv6:
- - 2607:F8B0::/32
- - 64:ff9b::/96
- group_id:
- - sg-edcd9784
- diff: True
-
-- name: "Delete group by its id"
- ec2_group:
- region: eu-west-1
- group_id: sg-33b4ee5b
- state: absent
-'''
-
-RETURN = '''
-group_name:
- description: Security group name
- sample: My Security Group
- type: str
- returned: on create/update
-group_id:
- description: Security group id
- sample: sg-abcd1234
- type: str
- returned: on create/update
-description:
- description: Description of security group
- sample: My Security Group
- type: str
- returned: on create/update
-tags:
- description: Tags associated with the security group
- sample:
- Name: My Security Group
- Purpose: protecting stuff
- type: dict
- returned: on create/update
-vpc_id:
- description: ID of VPC to which the security group belongs
- sample: vpc-abcd1234
- type: str
- returned: on create/update
-ip_permissions:
- description: Inbound rules associated with the security group.
- sample:
- - from_port: 8182
- ip_protocol: tcp
- ip_ranges:
- - cidr_ip: "1.1.1.1/32"
- ipv6_ranges: []
- prefix_list_ids: []
- to_port: 8182
- user_id_group_pairs: []
- type: list
- returned: on create/update
-ip_permissions_egress:
- description: Outbound rules associated with the security group.
- sample:
- - ip_protocol: -1
- ip_ranges:
- - cidr_ip: "0.0.0.0/0"
- ipv6_ranges: []
- prefix_list_ids: []
- user_id_group_pairs: []
- type: list
- returned: on create/update
-owner_id:
- description: AWS Account ID of the security group
- sample: 123456789012
- type: int
- returned: on create/update
-'''
-
-import json
-import re
-import itertools
-from copy import deepcopy
-from time import sleep
-from collections import namedtuple
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.aws.iam import get_aws_account_id
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
-from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
-from ansible.module_utils._text import to_text
-from ansible.module_utils.six import string_types
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
-valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
-current_account_id = None
-
-
-def rule_cmp(a, b):
- """Compare rules without descriptions"""
- for prop in ['port_range', 'protocol', 'target', 'target_type']:
- if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
- # equal protocols can interchange `(-1, -1)` and `(None, None)`
- if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
- continue
- elif getattr(a, prop) != getattr(b, prop):
- return False
- elif getattr(a, prop) != getattr(b, prop):
- return False
- return True
-
-
-def rules_to_permissions(rules):
- return [to_permission(rule) for rule in rules]
-
-
-def to_permission(rule):
- # take a Rule, output the serialized grant
- perm = {
- 'IpProtocol': rule.protocol,
- }
- perm['FromPort'], perm['ToPort'] = rule.port_range
- if rule.target_type == 'ipv4':
- perm['IpRanges'] = [{
- 'CidrIp': rule.target,
- }]
- if rule.description:
- perm['IpRanges'][0]['Description'] = rule.description
- elif rule.target_type == 'ipv6':
- perm['Ipv6Ranges'] = [{
- 'CidrIpv6': rule.target,
- }]
- if rule.description:
- perm['Ipv6Ranges'][0]['Description'] = rule.description
- elif rule.target_type == 'group':
- if isinstance(rule.target, tuple):
- pair = {}
- if rule.target[0]:
- pair['UserId'] = rule.target[0]
- # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
- if rule.target[1]:
- pair['GroupId'] = rule.target[1]
- elif rule.target[2]:
- pair['GroupName'] = rule.target[2]
- perm['UserIdGroupPairs'] = [pair]
- else:
- perm['UserIdGroupPairs'] = [{
- 'GroupId': rule.target
- }]
- if rule.description:
- perm['UserIdGroupPairs'][0]['Description'] = rule.description
- elif rule.target_type == 'ip_prefix':
- perm['PrefixListIds'] = [{
- 'PrefixListId': rule.target,
- }]
- if rule.description:
- perm['PrefixListIds'][0]['Description'] = rule.description
- elif rule.target_type not in valid_targets:
- raise ValueError('Invalid target type for rule {0}'.format(rule))
- return fix_port_and_protocol(perm)
-
-
-def rule_from_group_permission(perm):
- def ports_from_permission(p):
- if 'FromPort' not in p and 'ToPort' not in p:
- return (None, None)
- return (int(perm['FromPort']), int(perm['ToPort']))
-
- # outputs a rule tuple
- for target_key, target_subkey, target_type in [
- ('IpRanges', 'CidrIp', 'ipv4'),
- ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
- ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
- ]:
- if target_key not in perm:
- continue
- for r in perm[target_key]:
- # there may be several IP ranges here, which is ok
- yield Rule(
- ports_from_permission(perm),
- to_text(perm['IpProtocol']),
- r[target_subkey],
- target_type,
- r.get('Description')
- )
- if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
- for pair in perm['UserIdGroupPairs']:
- target = (
- pair.get('UserId', None),
- pair.get('GroupId', None),
- pair.get('GroupName', None),
- )
- if pair.get('UserId', '').startswith('amazon-'):
- # amazon-elb and amazon-prefix rules don't need
- # group-id specified, so remove it when querying
- # from permission
- target = (
- target[0],
- None,
- target[2],
- )
- elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
- target = (
- pair.get('UserId', None),
- pair.get('GroupId', None),
- pair.get('GroupName', None),
- )
-
- yield Rule(
- ports_from_permission(perm),
- to_text(perm['IpProtocol']),
- target,
- 'group',
- pair.get('Description')
- )
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound'])
-def get_security_groups_with_backoff(connection, **kwargs):
- return connection.describe_security_groups(**kwargs)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def sg_exists_with_backoff(connection, **kwargs):
- try:
- return connection.describe_security_groups(**kwargs)
- except is_boto3_error_code('InvalidGroup.NotFound'):
- return {'SecurityGroups': []}
-
-
-def deduplicate_rules_args(rules):
- """Returns unique rules"""
- if rules is None:
- return None
- return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
-
-
-def validate_rule(module, rule):
- VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
- 'group_id', 'group_name', 'group_desc',
- 'proto', 'from_port', 'to_port', 'rule_desc')
- if not isinstance(rule, dict):
- module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
- for k in rule:
- if k not in VALID_PARAMS:
- module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
-
- if 'group_id' in rule and 'cidr_ip' in rule:
- module.fail_json(msg='Specify group_id OR cidr_ip, not both')
- elif 'group_name' in rule and 'cidr_ip' in rule:
- module.fail_json(msg='Specify group_name OR cidr_ip, not both')
- elif 'group_id' in rule and 'cidr_ipv6' in rule:
- module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
- elif 'group_name' in rule and 'cidr_ipv6' in rule:
- module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
- elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
- module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
- elif 'group_id' in rule and 'group_name' in rule:
- module.fail_json(msg='Specify group_id OR group_name, not both')
-
-
-def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
- """
- Returns tuple of (target_type, target, group_created) after validating rule params.
-
- rule: Dict describing a rule.
- name: Name of the security group being managed.
- groups: Dict of all available security groups.
-
- AWS accepts an ip range or a security group as target of a rule. This
- function validate the rule specification and return either a non-None
- group_id or a non-None ip range.
- """
- FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
- group_id = None
- group_name = None
- target_group_created = False
-
- validate_rule(module, rule)
- if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
- # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
- owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
- group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
- groups[group_id] = group_instance
- groups[group_name] = group_instance
- # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
- if group_id and group_name:
- group_name = None
- return 'group', (owner_id, group_id, group_name), False
- elif 'group_id' in rule:
- return 'group', rule['group_id'], False
- elif 'group_name' in rule:
- group_name = rule['group_name']
- if group_name == name:
- group_id = group['GroupId']
- groups[group_id] = group
- groups[group_name] = group
- elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
- # both are VPC groups, this is ok
- group_id = groups[group_name]['GroupId']
- elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
- # both are EC2 classic, this is ok
- group_id = groups[group_name]['GroupId']
- else:
- auto_group = None
- filters = {'group-name': group_name}
- if vpc_id:
- filters['vpc-id'] = vpc_id
- # if we got here, either the target group does not exist, or there
- # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
- # is bad, so we have to create a new SG because no compatible group
- # exists
- if not rule.get('group_desc', '').strip():
- # retry describing the group once
- try:
- auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
- except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
- module.fail_json(msg="group %s will be automatically created by rule %s but "
- "no description was provided" % (group_name, rule))
- except ClientError as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
- elif not module.check_mode:
- params = dict(GroupName=group_name, Description=rule['group_desc'])
- if vpc_id:
- params['VpcId'] = vpc_id
- try:
- auto_group = client.create_security_group(**params)
- get_waiter(
- client, 'security_group_exists',
- ).wait(
- GroupIds=[auto_group['GroupId']],
- )
- except is_boto3_error_code('InvalidGroup.Duplicate'):
- # The group exists, but didn't show up in any of our describe-security-groups calls
- # Try searching on a filter for the name, and allow a retry window for AWS to update
- # the model on their end.
- try:
- auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
- except IndexError as e:
- module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
- except ClientError as e:
- module.fail_json_aws(
- e,
- msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
- if auto_group is not None:
- group_id = auto_group['GroupId']
- groups[group_id] = auto_group
- groups[group_name] = auto_group
- target_group_created = True
- return 'group', group_id, target_group_created
- elif 'cidr_ip' in rule:
- return 'ipv4', validate_ip(module, rule['cidr_ip']), False
- elif 'cidr_ipv6' in rule:
- return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
- elif 'ip_prefix' in rule:
- return 'ip_prefix', rule['ip_prefix'], False
-
- module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
-
-
-def ports_expand(ports):
- # takes a list of ports and returns a list of (port_from, port_to)
- ports_expanded = []
- for port in ports:
- if not isinstance(port, string_types):
- ports_expanded.append((port,) * 2)
- elif '-' in port:
- ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
- else:
- ports_expanded.append((int(port.strip()),) * 2)
-
- return ports_expanded
-
-
-def rule_expand_ports(rule):
- # takes a rule dict and returns a list of expanded rule dicts
- if 'ports' not in rule:
- if isinstance(rule.get('from_port'), string_types):
- rule['from_port'] = int(rule.get('from_port'))
- if isinstance(rule.get('to_port'), string_types):
- rule['to_port'] = int(rule.get('to_port'))
- return [rule]
-
- ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
-
- rule_expanded = []
- for from_to in ports_expand(ports):
- temp_rule = rule.copy()
- del temp_rule['ports']
- temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
- rule_expanded.append(temp_rule)
-
- return rule_expanded
-
-
-def rules_expand_ports(rules):
- # takes a list of rules and expands it based on 'ports'
- if not rules:
- return rules
-
- return [rule for rule_complex in rules
- for rule in rule_expand_ports(rule_complex)]
-
-
-def rule_expand_source(rule, source_type):
- # takes a rule dict and returns a list of expanded rule dicts for specified source_type
- sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
- source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
-
- rule_expanded = []
- for source in sources:
- temp_rule = rule.copy()
- for s in source_types_all:
- temp_rule.pop(s, None)
- temp_rule[source_type] = source
- rule_expanded.append(temp_rule)
-
- return rule_expanded
-
-
-def rule_expand_sources(rule):
- # takes a rule dict and returns a list of expanded rule discts
- source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
-
- return [r for stype in source_types
- for r in rule_expand_source(rule, stype)]
-
-
-def rules_expand_sources(rules):
- # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
- if not rules:
- return rules
-
- return [rule for rule_complex in rules
- for rule in rule_expand_sources(rule_complex)]
-
-
-def update_rules_description(module, client, rule_type, group_id, ip_permissions):
- if module.check_mode:
- return
- try:
- if rule_type == "in":
- client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
- if rule_type == "out":
- client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
-
-
-def fix_port_and_protocol(permission):
- for key in ('FromPort', 'ToPort'):
- if key in permission:
- if permission[key] is None:
- del permission[key]
- else:
- permission[key] = int(permission[key])
-
- permission['IpProtocol'] = to_text(permission['IpProtocol'])
-
- return permission
-
-
-def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
- if revoke_ingress:
- revoke(client, module, revoke_ingress, group_id, 'in')
- if revoke_egress:
- revoke(client, module, revoke_egress, group_id, 'out')
- return bool(revoke_ingress or revoke_egress)
-
-
-def revoke(client, module, ip_permissions, group_id, rule_type):
- if not module.check_mode:
- try:
- if rule_type == 'in':
- client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
- elif rule_type == 'out':
- client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
- except (BotoCoreError, ClientError) as e:
- rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
- module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
-
-
-def add_new_permissions(client, module, new_ingress, new_egress, group_id):
- if new_ingress:
- authorize(client, module, new_ingress, group_id, 'in')
- if new_egress:
- authorize(client, module, new_egress, group_id, 'out')
- return bool(new_ingress or new_egress)
-
-
-def authorize(client, module, ip_permissions, group_id, rule_type):
- if not module.check_mode:
- try:
- if rule_type == 'in':
- client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
- elif rule_type == 'out':
- client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
- except (BotoCoreError, ClientError) as e:
- rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
- module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
-
-
-def validate_ip(module, cidr_ip):
- split_addr = cidr_ip.split('/')
- if len(split_addr) == 2:
- # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
- # Get the network bits if IPv4, and validate if IPv6.
- try:
- ip = to_subnet(split_addr[0], split_addr[1])
- if ip != cidr_ip:
- module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
- "check the network mask and make sure that only network bits are set: {1}.".format(
- cidr_ip, ip))
- except ValueError:
- # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
- try:
- isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
- ip = cidr_ip
- except ValueError:
- # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
- # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
- ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
- if ip6 != cidr_ip:
- module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
- "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
- return ip6
- return ip
- return cidr_ip
-
-
-def update_tags(client, module, group_id, current_tags, tags, purge_tags):
- tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
-
- if not module.check_mode:
- if tags_to_delete:
- try:
- client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
-
- # Add/update tags
- if tags_need_modify:
- try:
- client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
- except (BotoCoreError, ClientError) as e:
- module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
-
- return bool(tags_need_modify or tags_to_delete)
-
-
-def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
- changed = False
- client = module.client('ec2')
- ingress_needs_desc_update = []
- egress_needs_desc_update = []
-
- for present_rule in present_egress:
- needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
- for r in needs_update:
- named_tuple_egress_list.remove(r)
- egress_needs_desc_update.extend(needs_update)
- for present_rule in present_ingress:
- needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
- for r in needs_update:
- named_tuple_ingress_list.remove(r)
- ingress_needs_desc_update.extend(needs_update)
-
- if ingress_needs_desc_update:
- update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
- changed |= True
- if egress_needs_desc_update:
- update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
- changed |= True
- return changed
-
-
-def create_security_group(client, module, name, description, vpc_id):
- if not module.check_mode:
- params = dict(GroupName=name, Description=description)
- if vpc_id:
- params['VpcId'] = vpc_id
- try:
- group = client.create_security_group(**params)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to create security group")
- # When a group is created, an egress_rule ALLOW ALL
- # to 0.0.0.0/0 is added automatically but it's not
- # reflected in the object returned by the AWS API
- # call. We re-read the group for getting an updated object
- # amazon sometimes takes a couple seconds to update the security group so wait till it exists
- while True:
- sleep(3)
- group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
- if group.get('VpcId') and not group.get('IpPermissionsEgress'):
- pass
- else:
- break
- return group
- return None
-
-
-def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
- group_id = group['GroupId']
- tries = 6
-
- def await_rules(group, desired_rules, purge, rule_key):
- for i in range(tries):
- current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
- if purge and len(current_rules ^ set(desired_rules)) == 0:
- return group
- elif purge:
- conflicts = current_rules ^ set(desired_rules)
- # For cases where set comparison is equivalent, but invalid port/proto exist
- for a, b in itertools.combinations(conflicts, 2):
- if rule_cmp(a, b):
- conflicts.discard(a)
- conflicts.discard(b)
- if not len(conflicts):
- return group
- elif current_rules.issuperset(desired_rules) and not purge:
- return group
- sleep(10)
- group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
- module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
- return group
-
- group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
- if 'VpcId' in group and module.params.get('rules_egress') is not None:
- group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
- return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
-
-
-def group_exists(client, module, vpc_id, group_id, name):
- params = {'Filters': []}
- if group_id:
- params['GroupIds'] = [group_id]
- if name:
- # Add name to filters rather than params['GroupNames']
- # because params['GroupNames'] only checks the default vpc if no vpc is provided
- params['Filters'].append({'Name': 'group-name', 'Values': [name]})
- if vpc_id:
- params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
- # Don't filter by description to maintain backwards compatibility
-
- try:
- security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
- all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Error in describe_security_groups")
-
- if security_groups:
- groups = dict((group['GroupId'], group) for group in all_groups)
- groups.update(dict((group['GroupName'], group) for group in all_groups))
- if vpc_id:
- vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
- groups.update(vpc_wins)
- # maintain backwards compatibility by using the last matching group
- return security_groups[-1], groups
- return None, {}
-
-
-def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
- if not hasattr(client, "update_security_group_rule_descriptions_egress"):
- all_rules = rules if rules else [] + rules_egress if rules_egress else []
- if any('rule_desc' in rule for rule in all_rules):
- module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
-
-
-def get_diff_final_resource(client, module, security_group):
- def get_account_id(security_group, module):
- try:
- owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
- except (BotoCoreError, ClientError) as e:
- owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
- return owner_id
-
- def get_final_tags(security_group_tags, specified_tags, purge_tags):
- if specified_tags is None:
- return security_group_tags
- tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
- end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
- end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
- end_result_tags.update(tags_need_modify)
- return end_result_tags
-
- def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
- if specified_rules is None:
- return security_group_rules
- if purge_rules:
- final_rules = []
- else:
- final_rules = list(security_group_rules)
- specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
- for rule in specified_rules:
- format_rule = {
- 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
- 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
- }
- if rule.get('proto', 'tcp') in ('all', '-1', -1):
- format_rule['ip_protocol'] = '-1'
- format_rule.pop('from_port')
- format_rule.pop('to_port')
- elif rule.get('ports'):
- if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
- rule['ports'] = [rule['ports']]
- for port in rule.get('ports'):
- if isinstance(port, string_types) and '-' in port:
- format_rule['from_port'], format_rule['to_port'] = port.split('-')
- else:
- format_rule['from_port'] = format_rule['to_port'] = port
- elif rule.get('from_port') or rule.get('to_port'):
- format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
- format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
- for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
- if rule.get(source_type):
- rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
- if rule.get('rule_desc'):
- format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
- else:
- if not isinstance(rule[source_type], list):
- rule[source_type] = [rule[source_type]]
- format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
- if rule.get('group_id') or rule.get('group_name'):
- rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
- format_rule['user_id_group_pairs'] = [{
- 'description': rule_sg.get('description', rule_sg.get('group_desc')),
- 'group_id': rule_sg.get('group_id', rule.get('group_id')),
- 'group_name': rule_sg.get('group_name', rule.get('group_name')),
- 'peering_status': rule_sg.get('peering_status'),
- 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
- 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
- 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
- }]
- for k, v in list(format_rule['user_id_group_pairs'][0].items()):
- if v is None:
- format_rule['user_id_group_pairs'][0].pop(k)
- final_rules.append(format_rule)
- # Order final rules consistently
- final_rules.sort(key=get_ip_permissions_sort_key)
- return final_rules
- security_group_ingress = security_group.get('ip_permissions', [])
- specified_ingress = module.params['rules']
- purge_ingress = module.params['purge_rules']
- security_group_egress = security_group.get('ip_permissions_egress', [])
- specified_egress = module.params['rules_egress']
- purge_egress = module.params['purge_rules_egress']
- return {
- 'description': module.params['description'],
- 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
- 'group_name': security_group.get('group_name', module.params['name']),
- 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
- 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
- 'owner_id': get_account_id(security_group, module),
- 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
- 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
-
-
-def flatten_nested_targets(module, rules):
- def _flatten(targets):
- for target in targets:
- if isinstance(target, list):
- for t in _flatten(target):
- yield t
- elif isinstance(target, string_types):
- yield target
-
- if rules is not None:
- for rule in rules:
- target_list_type = None
- if isinstance(rule.get('cidr_ip'), list):
- target_list_type = 'cidr_ip'
- elif isinstance(rule.get('cidr_ipv6'), list):
- target_list_type = 'cidr_ipv6'
- if target_list_type is not None:
- rule[target_list_type] = list(_flatten(rule[target_list_type]))
- return rules
-
-
-def get_rule_sort_key(dicts):
- if dicts.get('cidr_ip'):
- return dicts.get('cidr_ip')
- elif dicts.get('cidr_ipv6'):
- return dicts.get('cidr_ipv6')
- elif dicts.get('prefix_list_id'):
- return dicts.get('prefix_list_id')
- elif dicts.get('group_id'):
- return dicts.get('group_id')
- return None
-
-
-def get_ip_permissions_sort_key(rule):
- if rule.get('ip_ranges'):
- rule.get('ip_ranges').sort(key=get_rule_sort_key)
- return rule.get('ip_ranges')[0]['cidr_ip']
- elif rule.get('ipv6_ranges'):
- rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
- return rule.get('ipv6_ranges')[0]['cidr_ipv6']
- elif rule.get('prefix_list_ids'):
- rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
- return rule.get('prefix_list_ids')[0]['prefix_list_id']
- elif rule.get('user_id_group_pairs'):
- rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
- return rule.get('user_id_group_pairs')[0]['group_id']
- return None
-
-
-def main():
- argument_spec = dict(
- name=dict(),
- group_id=dict(),
- description=dict(),
- vpc_id=dict(),
- rules=dict(type='list'),
- rules_egress=dict(type='list'),
- state=dict(default='present', type='str', choices=['present', 'absent']),
- purge_rules=dict(default=True, required=False, type='bool'),
- purge_rules_egress=dict(default=True, required=False, type='bool'),
- tags=dict(required=False, type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, required=False, type='bool')
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[['name', 'group_id']],
- required_if=[['state', 'present', ['name']]],
- )
-
- name = module.params['name']
- group_id = module.params['group_id']
- description = module.params['description']
- vpc_id = module.params['vpc_id']
- rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
- rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
- rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
- rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
- state = module.params.get('state')
- purge_rules = module.params['purge_rules']
- purge_rules_egress = module.params['purge_rules_egress']
- tags = module.params['tags']
- purge_tags = module.params['purge_tags']
-
- if state == 'present' and not description:
- module.fail_json(msg='Must provide description when state is present.')
-
- changed = False
- client = module.client('ec2')
-
- verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
- group, groups = group_exists(client, module, vpc_id, group_id, name)
- group_created_new = not bool(group)
-
- global current_account_id
- current_account_id = get_aws_account_id(module)
-
- before = {}
- after = {}
-
- # Ensure requested group is absent
- if state == 'absent':
- if group:
- # found a match, delete it
- before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
- before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
- try:
- if not module.check_mode:
- client.delete_security_group(GroupId=group['GroupId'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
- else:
- group = None
- changed = True
- else:
- # no match found, no changes required
- pass
-
- # Ensure requested group is present
- elif state == 'present':
- if group:
- # existing group
- before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
- before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
- if group['Description'] != description:
- module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
- "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
- else:
- # no match found, create it
- group = create_security_group(client, module, name, description, vpc_id)
- changed = True
-
- if tags is not None and group is not None:
- current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
- changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
-
- if group:
- named_tuple_ingress_list = []
- named_tuple_egress_list = []
- current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
- current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
-
- for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
- (rules_egress, 'out', named_tuple_egress_list)]:
- if new_rules is None:
- continue
- for rule in new_rules:
- target_type, target, target_group_created = get_target_from_rule(
- module, client, rule, name, group, groups, vpc_id)
- changed |= target_group_created
-
- if rule.get('proto', 'tcp') in ('all', '-1', -1):
- rule['proto'] = '-1'
- rule['from_port'] = None
- rule['to_port'] = None
- try:
- int(rule.get('proto', 'tcp'))
- rule['proto'] = to_text(rule.get('proto', 'tcp'))
- rule['from_port'] = None
- rule['to_port'] = None
- except ValueError:
- # rule does not use numeric protocol spec
- pass
-
- named_tuple_rule_list.append(
- Rule(
- port_range=(rule['from_port'], rule['to_port']),
- protocol=to_text(rule.get('proto', 'tcp')),
- target=target, target_type=target_type,
- description=rule.get('rule_desc'),
- )
- )
-
- # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
- new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
- new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
-
- if module.params.get('rules_egress') is None and 'VpcId' in group:
- # when no egress rules are specified and we're in a VPC,
- # we add in a default allow all out rule, which was the
- # default behavior before egress rules were added
- rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
- if rule in current_egress:
- named_tuple_egress_list.append(rule)
- if rule not in current_egress:
- current_egress.append(rule)
-
- # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
- present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
- present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
-
- if purge_rules:
- revoke_ingress = []
- for p in present_ingress:
- if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
- revoke_ingress.append(to_permission(p))
- else:
- revoke_ingress = []
- if purge_rules_egress and module.params.get('rules_egress') is not None:
- if module.params.get('rules_egress') is []:
- revoke_egress = [
- to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
- if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
- ]
- else:
- revoke_egress = []
- for p in present_egress:
- if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
- revoke_egress.append(to_permission(p))
- else:
- revoke_egress = []
-
- # named_tuple_ingress_list and named_tuple_egress_list got updated by
- # method update_rule_descriptions, deep copy these two lists to new
- # variables for the record of the 'desired' ingress and egress sg permissions
- desired_ingress = deepcopy(named_tuple_ingress_list)
- desired_egress = deepcopy(named_tuple_egress_list)
-
- changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
-
- # Revoke old rules
- changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
- rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
-
- new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
- new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
- new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
- # Authorize new rules
- changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
-
- if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
- # A new group with no rules provided is already being awaited.
- # When it is created we wait for the default egress rule to be added by AWS
- security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
- elif changed and not module.check_mode:
- # keep pulling until current security group rules match the desired ingress and egress rules
- security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
- else:
- security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
- security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
- security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
-
- else:
- security_group = {'group_id': None}
-
- if module._diff:
- if module.params['state'] == 'present':
- after = get_diff_final_resource(client, module, security_group)
- if before.get('ip_permissions'):
- before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
-
- security_group['diff'] = [{'before': before, 'after': after}]
-
- module.exit_json(changed=changed, **security_group)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_net.py b/test/support/integration/plugins/modules/ec2_vpc_net.py
deleted file mode 100644
index 30e4b1e94c..0000000000
--- a/test/support/integration/plugins/modules/ec2_vpc_net.py
+++ /dev/null
@@ -1,524 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_net
-short_description: Configure AWS virtual private clouds
-description:
- - Create, modify, and terminate AWS virtual private clouds.
-version_added: "2.0"
-author:
- - Jonathan Davila (@defionscode)
- - Sloane Hertel (@s-hertel)
-options:
- name:
- description:
- - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
- required: yes
- type: str
- cidr_block:
- description:
- - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
- and is used in conjunction with the C(name) to ensure idempotence.
- required: yes
- type: list
- elements: str
- ipv6_cidr:
- description:
- - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
- or the size of the CIDR block.
- default: False
- type: bool
- version_added: '2.10'
- purge_cidrs:
- description:
- - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
- default: no
- type: bool
- version_added: '2.5'
- tenancy:
- description:
- - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
- default: default
- choices: [ 'default', 'dedicated' ]
- type: str
- dns_support:
- description:
- - Whether to enable AWS DNS support.
- default: yes
- type: bool
- dns_hostnames:
- description:
- - Whether to enable AWS hostname support.
- default: yes
- type: bool
- dhcp_opts_id:
- description:
- - The id of the DHCP options to use for this VPC.
- type: str
- tags:
- description:
- - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
- the VPC if it's different.
- aliases: [ 'resource_tags' ]
- type: dict
- state:
- description:
- - The state of the VPC. Either absent or present.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- multi_ok:
- description:
- - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
- duplicate VPCs created.
- type: bool
- default: false
-requirements:
- - boto3
- - botocore
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: create a VPC with dedicated tenancy and a couple of tags
- ec2_vpc_net:
- name: Module_dev2
- cidr_block: 10.10.0.0/16
- region: us-east-1
- tags:
- module: ec2_vpc_net
- this: works
- tenancy: dedicated
-
-- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
- ec2_vpc_net:
- name: Module_dev2
- cidr_block: 10.10.0.0/16
- ipv6_cidr: True
- region: us-east-1
- tenancy: dedicated
-'''
-
-RETURN = '''
-vpc:
- description: info about the VPC that was created or deleted
- returned: always
- type: complex
- contains:
- cidr_block:
- description: The CIDR of the VPC
- returned: always
- type: str
- sample: 10.0.0.0/16
- cidr_block_association_set:
- description: IPv4 CIDR blocks associated with the VPC
- returned: success
- type: list
- sample:
- "cidr_block_association_set": [
- {
- "association_id": "vpc-cidr-assoc-97aeeefd",
- "cidr_block": "20.0.0.0/24",
- "cidr_block_state": {
- "state": "associated"
- }
- }
- ]
- classic_link_enabled:
- description: indicates whether ClassicLink is enabled
- returned: always
- type: bool
- sample: false
- dhcp_options_id:
- description: the id of the DHCP options associated with this VPC
- returned: always
- type: str
- sample: dopt-0fb8bd6b
- id:
- description: VPC resource id
- returned: always
- type: str
- sample: vpc-c2e00da5
- instance_tenancy:
- description: indicates whether VPC uses default or dedicated tenancy
- returned: always
- type: str
- sample: default
- ipv6_cidr_block_association_set:
- description: IPv6 CIDR blocks associated with the VPC
- returned: success
- type: list
- sample:
- "ipv6_cidr_block_association_set": [
- {
- "association_id": "vpc-cidr-assoc-97aeeefd",
- "ipv6_cidr_block": "2001:db8::/56",
- "ipv6_cidr_block_state": {
- "state": "associated"
- }
- }
- ]
- is_default:
- description: indicates whether this is the default VPC
- returned: always
- type: bool
- sample: false
- state:
- description: state of the VPC
- returned: always
- type: str
- sample: available
- tags:
- description: tags attached to the VPC, includes name
- returned: always
- type: complex
- contains:
- Name:
- description: name tag for the VPC
- returned: always
- type: str
- sample: pk_vpc4
-'''
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from time import sleep, time
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
- ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
-from ansible.module_utils.six import string_types
-from ansible.module_utils._text import to_native
-from ansible.module_utils.network.common.utils import to_subnet
-
-
-def vpc_exists(module, vpc, name, cidr_block, multi):
- """Returns None or a vpc object depending on the existence of a VPC. When supplied
- with a CIDR, it will check for matching tags to determine if it is a match
- otherwise it will assume the VPC does not exist and thus return None.
- """
- try:
- matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
- # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
- if not matching_vpcs:
- matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe VPCs")
-
- if multi:
- return None
- elif len(matching_vpcs) == 1:
- return matching_vpcs[0]['VpcId']
- elif len(matching_vpcs) > 1:
- module.fail_json(msg='Currently there are %d VPCs that have the same name and '
- 'CIDR block you specified. If you would like to create '
- 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
- return None
-
-
-@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
-def get_classic_link_with_backoff(connection, vpc_id):
- try:
- return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
- except botocore.exceptions.ClientError as e:
- if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
- return False
- else:
- raise
-
-
-def get_vpc(module, connection, vpc_id):
- # wait for vpc to be available
- try:
- connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
-
- try:
- vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe VPCs")
- try:
- vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe VPCs")
-
- return vpc_obj
-
-
-def update_vpc_tags(connection, module, vpc_id, tags, name):
- if tags is None:
- tags = dict()
-
- tags.update({'Name': name})
- tags = dict((k, to_native(v)) for k, v in tags.items())
- try:
- current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
- tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
- if tags_to_update:
- if not module.check_mode:
- tags = ansible_dict_to_boto3_tag_list(tags_to_update)
- vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
-
- # Wait for tags to be updated
- expected_tags = boto3_tag_list_to_ansible_dict(tags)
- filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
- connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
-
- return True
- else:
- return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to update tags")
-
-
-def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
- if vpc_obj['DhcpOptionsId'] != dhcp_id:
- if not module.check_mode:
- try:
- connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
-
- try:
- # Wait for DhcpOptionsId to be updated
- filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
- connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
-
- return True
- else:
- return False
-
-
-def create_vpc(connection, module, cidr_block, tenancy):
- try:
- if not module.check_mode:
- vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
- else:
- module.exit_json(changed=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to create the VPC")
-
- # wait for vpc to exist
- try:
- connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
-
- return vpc_obj['Vpc']['VpcId']
-
-
-def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
- start_time = time()
- updated = False
- while time() < start_time + 300:
- current_value = connection.describe_vpc_attribute(
- Attribute=attribute,
- VpcId=vpc_id
- )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
- if current_value != expected_value:
- sleep(3)
- else:
- updated = True
- break
- if not updated:
- module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
-
-
-def get_cidr_network_bits(module, cidr_block):
- fixed_cidrs = []
- for cidr in cidr_block:
- split_addr = cidr.split('/')
- if len(split_addr) == 2:
- # this_ip is a IPv4 CIDR that may or may not have host bits set
- # Get the network bits.
- valid_cidr = to_subnet(split_addr[0], split_addr[1])
- if cidr != valid_cidr:
- module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
- "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
- fixed_cidrs.append(valid_cidr)
- else:
- # let AWS handle invalid CIDRs
- fixed_cidrs.append(cidr)
- return fixed_cidrs
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- cidr_block=dict(type='list', required=True),
- ipv6_cidr=dict(type='bool', default=False),
- tenancy=dict(choices=['default', 'dedicated'], default='default'),
- dns_support=dict(type='bool', default=True),
- dns_hostnames=dict(type='bool', default=True),
- dhcp_opts_id=dict(),
- tags=dict(type='dict', aliases=['resource_tags']),
- state=dict(choices=['present', 'absent'], default='present'),
- multi_ok=dict(type='bool', default=False),
- purge_cidrs=dict(type='bool', default=False),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- name = module.params.get('name')
- cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
- ipv6_cidr = module.params.get('ipv6_cidr')
- purge_cidrs = module.params.get('purge_cidrs')
- tenancy = module.params.get('tenancy')
- dns_support = module.params.get('dns_support')
- dns_hostnames = module.params.get('dns_hostnames')
- dhcp_id = module.params.get('dhcp_opts_id')
- tags = module.params.get('tags')
- state = module.params.get('state')
- multi = module.params.get('multi_ok')
-
- changed = False
-
- connection = module.client(
- 'ec2',
- retry_decorator=AWSRetry.jittered_backoff(
- retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
- )
- )
-
- if dns_hostnames and not dns_support:
- module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
-
- if state == 'present':
-
- # Check if VPC exists
- vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
-
- if vpc_id is None:
- vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
- changed = True
-
- vpc_obj = get_vpc(module, connection, vpc_id)
-
- associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
- if cidr['CidrBlockState']['State'] != 'disassociated')
- to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
- to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
- expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
-
- if len(cidr_block) > 1:
- for cidr in to_add:
- changed = True
- try:
- connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
- if ipv6_cidr:
- if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
- module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
- vpc_id,
- vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
- else:
- try:
- connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
-
- if purge_cidrs:
- for association_id in to_remove:
- changed = True
- try:
- connection.disassociate_vpc_cidr_block(AssociationId=association_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
- "are associated with the CIDR block before you can disassociate it.".format(association_id))
-
- if dhcp_id is not None:
- try:
- if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to update DHCP options")
-
- if tags is not None or name is not None:
- try:
- if update_vpc_tags(connection, module, vpc_id, tags, name):
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to update tags")
-
- current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
- current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
- if current_dns_enabled != dns_support:
- changed = True
- if not module.check_mode:
- try:
- connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to update enabled dns support attribute")
- if current_dns_hostnames != dns_hostnames:
- changed = True
- if not module.check_mode:
- try:
- connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
-
- # wait for associated cidrs to match
- if to_add or to_remove:
- try:
- connection.get_waiter('vpc_available').wait(
- VpcIds=[vpc_id],
- Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to wait for CIDRs to update")
-
- # try to wait for enableDnsSupport and enableDnsHostnames to match
- wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
- wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
-
- final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
- final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
- final_state['id'] = final_state.pop('vpc_id')
-
- module.exit_json(changed=changed, vpc=final_state)
-
- elif state == 'absent':
-
- # Check if VPC exists
- vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
-
- if vpc_id is not None:
- try:
- if not module.check_mode:
- connection.delete_vpc(VpcId=vpc_id)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
- "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
-
- module.exit_json(changed=changed, vpc={})
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_subnet.py b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
deleted file mode 100644
index 5085e99b79..0000000000
--- a/test/support/integration/plugins/modules/ec2_vpc_subnet.py
+++ /dev/null
@@ -1,604 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_subnet
-short_description: Manage subnets in AWS virtual private clouds
-description:
- - Manage subnets in AWS virtual private clouds.
-version_added: "2.0"
-author:
-- Robert Estelle (@erydo)
-- Brad Davidson (@brandond)
-requirements: [ boto3 ]
-options:
- az:
- description:
- - "The availability zone for the subnet."
- type: str
- cidr:
- description:
- - "The CIDR block for the subnet. E.g. 192.0.2.0/24."
- type: str
- required: true
- ipv6_cidr:
- description:
- - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
- - "Required if I(assign_instances_ipv6=true)"
- version_added: "2.5"
- type: str
- tags:
- description:
- - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
- aliases: [ 'resource_tags' ]
- type: dict
- state:
- description:
- - "Create or remove the subnet."
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- vpc_id:
- description:
- - "VPC ID of the VPC in which to create or delete the subnet."
- required: true
- type: str
- map_public:
- description:
- - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
- type: bool
- default: 'no'
- version_added: "2.4"
- assign_instances_ipv6:
- description:
- - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
- type: bool
- default: false
- version_added: "2.5"
- wait:
- description:
- - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
- type: bool
- default: true
- version_added: "2.5"
- wait_timeout:
- description:
- - "Number of seconds to wait for subnet to become available I(wait=True)."
- default: 300
- version_added: "2.5"
- type: int
- purge_tags:
- description:
- - Whether or not to remove tags that do not appear in the I(tags) list.
- type: bool
- default: true
- version_added: "2.5"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create subnet for database servers
- ec2_vpc_subnet:
- state: present
- vpc_id: vpc-123456
- cidr: 10.0.1.16/28
- tags:
- Name: Database Subnet
- register: database_subnet
-
-- name: Remove subnet for database servers
- ec2_vpc_subnet:
- state: absent
- vpc_id: vpc-123456
- cidr: 10.0.1.16/28
-
-- name: Create subnet with IPv6 block assigned
- ec2_vpc_subnet:
- state: present
- vpc_id: vpc-123456
- cidr: 10.1.100.0/24
- ipv6_cidr: 2001:db8:0:102::/64
-
-- name: Remove IPv6 block assigned to subnet
- ec2_vpc_subnet:
- state: present
- vpc_id: vpc-123456
- cidr: 10.1.100.0/24
- ipv6_cidr: ''
-'''
-
-RETURN = '''
-subnet:
- description: Dictionary of subnet values
- returned: I(state=present)
- type: complex
- contains:
- id:
- description: Subnet resource id
- returned: I(state=present)
- type: str
- sample: subnet-b883b2c4
- cidr_block:
- description: The IPv4 CIDR of the Subnet
- returned: I(state=present)
- type: str
- sample: "10.0.0.0/16"
- ipv6_cidr_block:
- description: The IPv6 CIDR block actively associated with the Subnet
- returned: I(state=present)
- type: str
- sample: "2001:db8:0:102::/64"
- availability_zone:
- description: Availability zone of the Subnet
- returned: I(state=present)
- type: str
- sample: us-east-1a
- state:
- description: state of the Subnet
- returned: I(state=present)
- type: str
- sample: available
- tags:
- description: tags attached to the Subnet, includes name
- returned: I(state=present)
- type: dict
- sample: {"Name": "My Subnet", "env": "staging"}
- map_public_ip_on_launch:
- description: whether public IP is auto-assigned to new instances
- returned: I(state=present)
- type: bool
- sample: false
- assign_ipv6_address_on_creation:
- description: whether IPv6 address is auto-assigned to new instances
- returned: I(state=present)
- type: bool
- sample: false
- vpc_id:
- description: the id of the VPC where this Subnet exists
- returned: I(state=present)
- type: str
- sample: vpc-67236184
- available_ip_address_count:
- description: number of available IPv4 addresses
- returned: I(state=present)
- type: str
- sample: 251
- default_for_az:
- description: indicates whether this is the default Subnet for this Availability Zone
- returned: I(state=present)
- type: bool
- sample: false
- ipv6_association_id:
- description: The IPv6 association ID for the currently associated CIDR
- returned: I(state=present)
- type: str
- sample: subnet-cidr-assoc-b85c74d2
- ipv6_cidr_block_association_set:
- description: An array of IPv6 cidr block association set information.
- returned: I(state=present)
- type: complex
- contains:
- association_id:
- description: The association ID
- returned: always
- type: str
- ipv6_cidr_block:
- description: The IPv6 CIDR block that is associated with the subnet.
- returned: always
- type: str
- ipv6_cidr_block_state:
- description: A hash/dict that contains a single item. The state of the cidr block association.
- returned: always
- type: dict
- contains:
- state:
- description: The CIDR block association state.
- returned: always
- type: str
-'''
-
-
-import time
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
- camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
-
-
-def get_subnet_info(subnet):
- if 'Subnets' in subnet:
- return [get_subnet_info(s) for s in subnet['Subnets']]
- elif 'Subnet' in subnet:
- subnet = camel_dict_to_snake_dict(subnet['Subnet'])
- else:
- subnet = camel_dict_to_snake_dict(subnet)
-
- if 'tags' in subnet:
- subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
- else:
- subnet['tags'] = dict()
-
- if 'subnet_id' in subnet:
- subnet['id'] = subnet['subnet_id']
- del subnet['subnet_id']
-
- subnet['ipv6_cidr_block'] = ''
- subnet['ipv6_association_id'] = ''
- ipv6set = subnet.get('ipv6_cidr_block_association_set')
- if ipv6set:
- for item in ipv6set:
- if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
- subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
- subnet['ipv6_association_id'] = item['association_id']
-
- return subnet
-
-
-@AWSRetry.exponential_backoff()
-def describe_subnets_with_backoff(client, **params):
- return client.describe_subnets(**params)
-
-
-def waiter_params(module, params, start_time):
- if not module.botocore_at_least("1.7.0"):
- remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
- params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
- return params
-
-
-def handle_waiter(conn, module, waiter_name, params, start_time):
- try:
- get_waiter(conn, waiter_name).wait(
- **waiter_params(module, params, start_time)
- )
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, "Failed to wait for updates to complete")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "An exception happened while trying to wait for updates")
-
-
-def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
- wait = module.params['wait']
- wait_timeout = module.params['wait_timeout']
-
- params = dict(VpcId=vpc_id,
- CidrBlock=cidr)
-
- if ipv6_cidr:
- params['Ipv6CidrBlock'] = ipv6_cidr
-
- if az:
- params['AvailabilityZone'] = az
-
- try:
- subnet = get_subnet_info(conn.create_subnet(**params))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create subnet")
-
- # Sometimes AWS takes its time to create a subnet and so using
- # new subnets's id to do things like create tags results in
- # exception.
- if wait and subnet.get('state') != 'available':
- handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
- try:
- conn.get_waiter('subnet_available').wait(
- **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
- )
- subnet['state'] = 'available'
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
-
- return subnet
-
-
-def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
- changed = False
-
- filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
- try:
- cur_tags = conn.describe_tags(Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't describe tags")
-
- to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
-
- if to_update:
- try:
- if not module.check_mode:
- AWSRetry.exponential_backoff(
- catch_extra_error_codes=['InvalidSubnetID.NotFound']
- )(conn.create_tags)(
- Resources=[subnet['id']],
- Tags=ansible_dict_to_boto3_tag_list(to_update)
- )
-
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create tags")
-
- if to_delete:
- try:
- if not module.check_mode:
- tags_list = []
- for key in to_delete:
- tags_list.append({'Key': key})
-
- AWSRetry.exponential_backoff(
- catch_extra_error_codes=['InvalidSubnetID.NotFound']
- )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
-
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete tags")
-
- if module.params['wait'] and not module.check_mode:
- # Wait for tags to be updated
- filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
- handle_waiter(conn, module, 'subnet_exists',
- {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
-
- return changed
-
-
-def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
- if check_mode:
- return
- try:
- conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
-
-
-def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
- if check_mode:
- return
- try:
- conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
-
-
-def disassociate_ipv6_cidr(conn, module, subnet, start_time):
- if subnet.get('assign_ipv6_address_on_creation'):
- ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
-
- try:
- conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
- .format(subnet['ipv6_association_id'], subnet['id']))
-
- # Wait for cidr block to be disassociated
- if module.params['wait']:
- filters = ansible_dict_to_boto3_filter_list(
- {'ipv6-cidr-block-association.state': ['disassociated'],
- 'vpc-id': subnet['vpc_id']}
- )
- handle_waiter(conn, module, 'subnet_exists',
- {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
-
-
-def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
- wait = module.params['wait']
- changed = False
-
- if subnet['ipv6_association_id'] and not ipv6_cidr:
- if not check_mode:
- disassociate_ipv6_cidr(conn, module, subnet, start_time)
- changed = True
-
- if ipv6_cidr:
- filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
- 'vpc-id': subnet['vpc_id']})
-
- try:
- check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get subnet info")
-
- if check_subnets and check_subnets[0]['ipv6_cidr_block']:
- module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
-
- if subnet['ipv6_association_id']:
- if not check_mode:
- disassociate_ipv6_cidr(conn, module, subnet, start_time)
- changed = True
-
- try:
- if not check_mode:
- associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
- else:
- if not check_mode and wait:
- filters = ansible_dict_to_boto3_filter_list(
- {'ipv6-cidr-block-association.state': ['associated'],
- 'vpc-id': subnet['vpc_id']}
- )
- handle_waiter(conn, module, 'subnet_exists',
- {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
-
- if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
- subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
- subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
- if subnet['ipv6_cidr_block_association_set']:
- subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
- else:
- subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
-
- return changed
-
-
-def get_matching_subnet(conn, module, vpc_id, cidr):
- filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
- try:
- subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get matching subnet")
-
- if subnets:
- return subnets[0]
-
- return None
-
-
-def ensure_subnet_present(conn, module):
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
- changed = False
-
- # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
- start_time = time.time()
-
- if subnet is None:
- if not module.check_mode:
- subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
- ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
- changed = True
- # Subnet will be None when check_mode is true
- if subnet is None:
- return {
- 'changed': changed,
- 'subnet': {}
- }
- if module.params['wait']:
- handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
-
- if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
- if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
- changed = True
-
- if module.params['map_public'] != subnet['map_public_ip_on_launch']:
- ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
- changed = True
-
- if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
- ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
- changed = True
-
- if module.params['tags'] != subnet['tags']:
- stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
- if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
- changed = True
-
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
- if not module.check_mode and module.params['wait']:
- # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
- # so we only wait for those if necessary just before returning the subnet
- subnet = ensure_final_subnet(conn, module, subnet, start_time)
-
- return {
- 'changed': changed,
- 'subnet': subnet
- }
-
-
-def ensure_final_subnet(conn, module, subnet, start_time):
- for rewait in range(0, 30):
- map_public_correct = False
- assign_ipv6_correct = False
-
- if module.params['map_public'] == subnet['map_public_ip_on_launch']:
- map_public_correct = True
- else:
- if module.params['map_public']:
- handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
- else:
- handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
-
- if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
- assign_ipv6_correct = True
- else:
- if module.params['assign_instances_ipv6']:
- handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
- else:
- handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
-
- if map_public_correct and assign_ipv6_correct:
- break
-
- time.sleep(5)
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
-
- return subnet
-
-
-def ensure_subnet_absent(conn, module):
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
- if subnet is None:
- return {'changed': False}
-
- try:
- if not module.check_mode:
- conn.delete_subnet(SubnetId=subnet['id'])
- if module.params['wait']:
- handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
- return {'changed': True}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete subnet")
-
-
-def main():
- argument_spec = dict(
- az=dict(default=None, required=False),
- cidr=dict(required=True),
- ipv6_cidr=dict(default='', required=False),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
- vpc_id=dict(required=True),
- map_public=dict(default=False, required=False, type='bool'),
- assign_instances_ipv6=dict(default=False, required=False, type='bool'),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=300, required=False),
- purge_tags=dict(default=True, type='bool')
- )
-
- required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
-
- if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
- module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
-
- if not module.botocore_at_least("1.7.0"):
- module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
-
- connection = module.client('ec2')
-
- state = module.params.get('state')
-
- try:
- if state == 'present':
- result = ensure_subnet_present(connection, module)
- elif state == 'absent':
- result = ensure_subnet_absent(connection, module)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/utils/shippable/incidental/aws.sh b/test/utils/shippable/incidental/aws.sh
deleted file mode 120000
index 700ad3edcf..0000000000
--- a/test/utils/shippable/incidental/aws.sh
+++ /dev/null
@@ -1 +0,0 @@
-cloud.sh \ No newline at end of file