summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiva Popuri <popuri.siva@gmail.com>2015-07-20 10:17:56 -0500
committerSiva Popuri <popuri.siva@gmail.com>2015-07-20 10:17:56 -0500
commit0135e136115b4a717c19cf0a43b7a1c5b3cf453e (patch)
tree9f7f5d249ce1c4e1069971e76abac15cbea0f1d9
parent3e5fa95891b9c170301305423886cbd9a8075fa2 (diff)
parentedbd715985fbe75b236fce2d368dc5c8cb8745eb (diff)
downloadansible-modules-extras-0135e136115b4a717c19cf0a43b7a1c5b3cf453e.tar.gz
merge changes from upstream
-rw-r--r--.travis.yml1
-rw-r--r--VERSION2
-rw-r--r--cloud/amazon/GUIDELINES.md88
-rw-r--r--cloud/amazon/cloudtrail.py2
-rw-r--r--cloud/amazon/dynamodb_table.py286
-rw-r--r--cloud/amazon/ec2_ami_copy.py208
-rw-r--r--cloud/amazon/ec2_eni.py404
-rw-r--r--cloud/amazon/ec2_eni_facts.py4
-rw-r--r--cloud/amazon/ec2_vpc_igw.py159
-rw-r--r--cloud/amazon/ec2_win_password.py80
-rw-r--r--cloud/amazon/route53_zone.py164
-rw-r--r--cloud/amazon/s3_logging.py184
-rw-r--r--cloud/amazon/sts_assume_role.py154
-rw-r--r--cloud/centurylink/__init__.py1
-rw-r--r--cloud/centurylink/clc_publicip.py353
-rw-r--r--cloud/cloudstack/cs_account.py6
-rw-r--r--cloud/cloudstack/cs_affinitygroup.py6
-rw-r--r--cloud/cloudstack/cs_facts.py221
-rw-r--r--cloud/cloudstack/cs_firewall.py16
-rw-r--r--cloud/cloudstack/cs_instance.py95
-rw-r--r--cloud/cloudstack/cs_instancegroup.py6
-rw-r--r--cloud/cloudstack/cs_iso.py6
-rw-r--r--cloud/cloudstack/cs_network.py15
-rw-r--r--cloud/cloudstack/cs_portforward.py30
-rw-r--r--cloud/cloudstack/cs_project.py17
-rw-r--r--cloud/cloudstack/cs_securitygroup.py6
-rw-r--r--cloud/cloudstack/cs_securitygroup_rule.py16
-rw-r--r--cloud/cloudstack/cs_sshkeypair.py6
-rw-r--r--cloud/cloudstack/cs_staticnat.py316
-rw-r--r--cloud/cloudstack/cs_template.py6
-rw-r--r--cloud/cloudstack/cs_vmsnapshot.py6
-rw-r--r--cloud/lxc/lxc_container.py4
-rw-r--r--cloud/misc/virt.py10
-rw-r--r--cloud/rackspace/__init__.py0
-rw-r--r--cloud/rackspace/rax_clb_ssl.py269
-rw-r--r--cloud/rackspace/rax_mon_alarm.py227
-rw-r--r--cloud/rackspace/rax_mon_check.py313
-rw-r--r--cloud/rackspace/rax_mon_entity.py192
-rw-r--r--cloud/rackspace/rax_mon_notification.py176
-rw-r--r--cloud/rackspace/rax_mon_notification_plan.py181
-rw-r--r--cloud/vmware/vsphere_copy.py (renamed from cloud/vmware/vsphere_copy)15
-rw-r--r--cloud/webfaction/webfaction_app.py29
-rw-r--r--cloud/webfaction/webfaction_db.py30
-rw-r--r--cloud/webfaction/webfaction_domain.py2
-rw-r--r--cloud/webfaction/webfaction_mailbox.py2
-rw-r--r--cloud/webfaction/webfaction_site.py2
-rw-r--r--clustering/__init__.py0
-rw-r--r--clustering/consul.py41
-rw-r--r--clustering/consul_kv.py10
-rw-r--r--database/misc/mongodb_user.py8
-rw-r--r--files/patch.py21
-rw-r--r--monitoring/airbrake_deployment.py5
-rw-r--r--monitoring/librato_annotation.py7
-rw-r--r--monitoring/newrelic_deployment.py5
-rw-r--r--monitoring/pagerduty.py77
-rw-r--r--monitoring/rollbar_deployment.py1
-rw-r--r--monitoring/sensu_check.py336
-rw-r--r--monitoring/zabbix_host.py49
-rw-r--r--network/citrix/netscaler.py4
-rw-r--r--network/dnsmadeeasy.py4
-rw-r--r--network/haproxy.py14
-rw-r--r--notification/flowdock.py5
-rw-r--r--notification/grove.py2
-rwxr-xr-xnotification/hall.py22
-rw-r--r--notification/hipchat.py40
-rw-r--r--notification/irc.py48
-rw-r--r--notification/mail.py12
-rw-r--r--notification/nexmo.py1
-rw-r--r--notification/sendgrid.py6
-rw-r--r--notification/slack.py2
-rw-r--r--notification/twilio.py6
-rw-r--r--notification/typetalk.py16
-rw-r--r--packaging/dpkg_selections.py (renamed from packaging/dpkg_selections)0
-rw-r--r--packaging/elasticsearch_plugin.py168
-rw-r--r--packaging/language/bundler.py211
-rw-r--r--packaging/os/openbsd_pkg.py2
-rw-r--r--packaging/os/portage.py16
-rw-r--r--system/filesystem.py86
-rw-r--r--system/firewalld.py53
-rw-r--r--system/gluster_volume.py2
-rw-r--r--system/zfs.py17
-rwxr-xr-xtest-docs.sh21
-rw-r--r--windows/win_iis_virtualdirectory.ps1128
-rw-r--r--windows/win_iis_virtualdirectory.py57
-rw-r--r--windows/win_iis_webapplication.ps1132
-rw-r--r--windows/win_iis_webapplication.py68
-rw-r--r--windows/win_iis_webapppool.ps1112
-rw-r--r--windows/win_iis_webapppool.py112
-rw-r--r--windows/win_iis_webbinding.ps1138
-rw-r--r--windows/win_iis_webbinding.py143
-rw-r--r--windows/win_iis_website.ps1179
-rw-r--r--windows/win_iis_website.py133
-rw-r--r--windows/win_regedit.ps1190
-rw-r--r--windows/win_regedit.py109
-rw-r--r--windows/win_scheduled_task.ps174
-rw-r--r--windows/win_scheduled_task.py51
-rw-r--r--windows/win_unzip.ps1157
-rw-r--r--windows/win_unzip.py106
98 files changed, 7173 insertions, 350 deletions
diff --git a/.travis.yml b/.travis.yml
index 84ec3a09..057524c4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,3 +13,4 @@ script:
- python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' .
- python2.6 -m compileall -fq .
- python2.7 -m compileall -fq .
+ #- ./test-docs.sh extras
diff --git a/VERSION b/VERSION
index 53adb84c..ee36851a 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.8.2
+${version}
diff --git a/cloud/amazon/GUIDELINES.md b/cloud/amazon/GUIDELINES.md
new file mode 100644
index 00000000..ee5aea90
--- /dev/null
+++ b/cloud/amazon/GUIDELINES.md
@@ -0,0 +1,88 @@
+Guidelines for AWS modules
+--------------------------
+
+Naming your module
+==================
+
+Base the name of the module on the part of AWS that
+you actually use. (A good rule of thumb is to take
+whatever module you use with boto as a starting point).
+
+Don't further abbreviate names - if something is a well
+known abbreviation due to it being a major component of
+AWS, that's fine, but don't create new ones independently
+(e.g. VPC, ELB, etc. are fine)
+
+Using boto
+==========
+
+Wrap the `import` statements in a try block and fail the
+module later on if the import fails
+
+```
+try:
+ import boto
+ import boto.module.that.you.use
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+<lots of code here>
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ module_specific_parameter=dict(),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+```
+
+
+Try and keep backward compatibility with relatively recent
+versions of boto. That means that if want to implement some
+functionality that uses a new feature of boto, it should only
+fail if that feature actually needs to be run, with a message
+saying which version of boto is needed.
+
+Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`)
+to check whether boto supports a feature rather than version checking
+
+e.g. from the `ec2` module:
+```
+if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+else:
+ if instance_profile_name is not None:
+ module.fail_json(
+ msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
+```
+
+
+Connecting to AWS
+=================
+
+For EC2 you can just use
+
+```
+ec2 = ec2_connect(module)
+```
+
+For other modules, you should use `get_aws_connection_info` and then
+`connect_to_aws`. To connect to an example `xyz` service:
+
+```
+region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+xyz = connect_to_aws(boto.xyz, region, **aws_connect_params)
+```
+
+The reason for using `get_aws_connection_info` and `connect_to_aws`
+(and even `ec2_connect` uses those under the hood) rather than doing it
+yourself is that they handle some of the more esoteric connection
+options such as security tokens and boto profiles.
diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py
index 1c9313bb..557f2eba 100644
--- a/cloud/amazon/cloudtrail.py
+++ b/cloud/amazon/cloudtrail.py
@@ -21,7 +21,7 @@ short_description: manage CloudTrail creation and deletion
description:
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
-author:
+author:
- "Ansible Core Team"
- "Ted Timmons"
requirements:
diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py
new file mode 100644
index 00000000..c97ff6f0
--- /dev/null
+++ b/cloud/amazon/dynamodb_table.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = """
+---
+module: dynamodb_table
+short_description: Create, update or delete AWS Dynamo DB tables.
+description:
+ - Create or delete AWS Dynamo DB tables.
+ - Can update the provisioned throughput on existing tables.
+ - Returns the status of the specified table.
+version_added: "2.0"
+author: Alan Loi (@loia)
+version_added: "2.0"
+requirements:
+ - "boto >= 2.13.2"
+options:
+ state:
+ description:
+ - Create or delete the table
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Name of the table.
+ required: true
+ hash_key_name:
+ description:
+ - Name of the hash key.
+ - Required when C(state=present).
+ required: false
+ default: null
+ hash_key_type:
+ description:
+ - Type of the hash key.
+ required: false
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ range_key_name:
+ description:
+ - Name of the range key.
+ required: false
+ default: null
+ range_key_type:
+ description:
+ - Type of the range key.
+ required: false
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision.
+ required: false
+ default: 1
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision.
+ required: false
+ default: 1
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = '''
+# Create dynamo table with hash and range primary key
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ hash_key_name: id
+ hash_key_type: STRING
+ range_key_name: create_time
+ range_key_type: NUMBER
+ read_capacity: 2
+ write_capacity: 2
+
+# Update capacity on existing dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ read_capacity: 10
+ write_capacity: 10
+
+# Delete dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ state: absent
+'''
+
+RETURN = '''
+table_status:
+ description: The current status of the table.
+ returned: success
+ type: string
+ sample: ACTIVE
+'''
+
+try:
+ import boto
+ import boto.dynamodb2
+ from boto.dynamodb2.table import Table
+ from boto.dynamodb2.fields import HashKey, RangeKey
+ from boto.dynamodb2.types import STRING, NUMBER, BINARY
+ from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
+ HAS_BOTO = True
+
+except ImportError:
+ HAS_BOTO = False
+
+
+DYNAMO_TYPE_MAP = {
+ 'STRING': STRING,
+ 'NUMBER': NUMBER,
+ 'BINARY': BINARY
+}
+
+
+def create_or_update_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+ hash_key_name = module.params.get('hash_key_name')
+ hash_key_type = module.params.get('hash_key_type')
+ range_key_name = module.params.get('range_key_name')
+ range_key_type = module.params.get('range_key_type')
+ read_capacity = module.params.get('read_capacity')
+ write_capacity = module.params.get('write_capacity')
+
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
+ RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
+ ]
+ throughput = {
+ 'read': read_capacity,
+ 'write': write_capacity
+ }
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ hash_key_name=hash_key_name,
+ hash_key_type=hash_key_type,
+ range_key_name=range_key_name,
+ range_key_type=range_key_type,
+ read_capacity=read_capacity,
+ write_capacity=write_capacity,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+ if dynamo_table_exists(table):
+ result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode)
+ else:
+ if not module.check_mode:
+ Table.create(table_name, connection=connection, schema=schema, throughput=throughput)
+ result['changed'] = True
+
+ if not module.check_mode:
+ result['table_status'] = table.describe()['Table']['TableStatus']
+
+ except BotoServerError:
+ result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def delete_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+ if dynamo_table_exists(table):
+ if not module.check_mode:
+ table.delete()
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ except BotoServerError:
+ result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def dynamo_table_exists(table):
+ try:
+ table.describe()
+ return True
+
+ except JSONResponseError, e:
+ if e.message and e.message.startswith('Requested resource not found'):
+ return False
+ else:
+ raise e
+
+
+def update_dynamo_table(table, throughput=None, check_mode=False):
+ table.describe() # populate table details
+
+ if has_throughput_changed(table, throughput):
+ if not check_mode:
+ return table.update(throughput=throughput)
+ else:
+ return True
+
+ return False
+
+
+def has_throughput_changed(table, new_throughput):
+ if not new_throughput:
+ return False
+
+ return new_throughput['read'] != table.throughput['read'] or \
+ new_throughput['write'] != table.throughput['write']
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ hash_key_name=dict(required=True, type='str'),
+ hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ range_key_name=dict(type='str'),
+ range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ read_capacity=dict(default=1, type='int'),
+ write_capacity=dict(default=1, type='int'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
+
+ except (NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_or_update_dynamo_table(connection, module)
+ elif state == 'absent':
+ delete_dynamo_table(connection, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py
new file mode 100644
index 00000000..ff9bde88
--- /dev/null
+++ b/cloud/amazon/ec2_ami_copy.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_copy
+short_description: copies AMI between AWS regions, return new image id
+description:
+ - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
+version_added: "2.0"
+options:
+ source_region:
+ description:
+ - the source region that AMI should be copied from
+ required: true
+ region:
+ description:
+ - the destination region that AMI should be copied to
+ required: true
+ aliases: ['aws_region', 'ec2_region', 'dest_region']
+ source_image_id:
+ description:
+ - the id of the image in source region that should be copied
+ required: true
+ name:
+ description:
+ - The name of the new image to copy
+ required: false
+ default: null
+ description:
+ description:
+ - An optional human-readable string describing the contents and purpose of the new AMI.
+ required: false
+ default: null
+ wait:
+ description:
+ - wait for the copied AMI to be in state 'available' before returning.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ required: false
+ default: 1200
+ tags:
+ description:
+ - a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
+ required: false
+ default: null
+
+author: Amir Moulavi <amir.moulavi@gmail.com>
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Basic AMI Copy
+- local_action:
+ module: ec2_ami_copy
+ source_region: eu-west-1
+ dest_region: us-east-1
+ source_image_id: ami-xxxxxxx
+ name: SuperService-new-AMI
+ description: latest patch
+ tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}'
+ wait: yes
+ register: image_id
+'''
+
+
+import sys
+import time
+
+try:
+ import boto
+ import boto.ec2
+ from boto.vpc import VPCConnection
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+def copy_image(module, ec2):
+ """
+ Copies an AMI
+
+ module : AnsibleModule object
+ ec2: authenticated ec2 connection object
+ """
+
+ source_region = module.params.get('source_region')
+ source_image_id = module.params.get('source_image_id')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ tags = module.params.get('tags')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ wait = module.params.get('wait')
+
+ try:
+ params = {'source_region': source_region,
+ 'source_image_id': source_image_id,
+ 'name': name,
+ 'description': description
+ }
+
+ image_id = ec2.copy_image(**params).image_id
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
+
+ img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait)
+
+ img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait)
+
+ register_tags_if_any(module, ec2, tags, image_id)
+
+ module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True)
+
+
+# register tags to the copied AMI in dest_region
+def register_tags_if_any(module, ec2, tags, image_id):
+ if tags:
+ try:
+ ec2.create_tags([image_id], tags)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# wait here until the image is copied (i.e. the state becomes available
+def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait):
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
+ img = ec2.get_image(image_id)
+ time.sleep(3)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="timed out waiting for image to be copied")
+ return img
+
+
+# wait until the image is recognized.
+def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait):
+ for i in range(wait_timeout):
+ try:
+ return ec2.get_image(image_id)
+ except boto.exception.EC2ResponseError, e:
+ # This exception we expect initially right after registering the copy with EC2 API
+ if 'InvalidAMIID.NotFound' in e.error_code and wait:
+ time.sleep(1)
+ else:
+ # On any other exception we should fail
+ module.fail_json(
+ msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str(
+ e))
+ else:
+ module.fail_json(msg="timed out waiting for image to be recognized")
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ source_region=dict(required=True),
+ source_image_id=dict(required=True),
+ name=dict(),
+ description=dict(default=""),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(default=1200),
+ tags=dict(type='dict')))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ try:
+ ec2 = ec2_connect(module)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ try:
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ vpc = connect_to_aws(boto.vpc, region, **boto_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg = str(e))
+
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ copy_image(module, ec2)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
+
diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py
new file mode 100644
index 00000000..9e878e7d
--- /dev/null
+++ b/cloud/amazon/ec2_eni.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
+version_added: "2.0"
+author: Rob White, wimnat [at] gmail.com, @wimnat
+options:
+ eni_id:
+ description:
+ - The ID of the ENI
+ required: false
+ default: null
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
+ required: false
+ default: null
+ private_ip_address:
+ description:
+ - Private IP address.
+ required: false
+ default: null
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI. Only required when state=present.
+ required: true
+ description:
+ description:
+ - Optional description of the ENI.
+ required: false
+ default: null
+ security_groups:
+ description:
+ - List of security groups associated with the interface. Only used when state=present.
+ required: false
+ default: null
+ state:
+ description:
+ - Create or delete ENI.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ required: false
+ default: 0
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
+ required: false
+ default: no
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Destroy an ENI, detaching it from any instance if necessary
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: yes
+ state: absent
+
+# Update an ENI
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Detach an ENI from an instance
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- ec2_eni:
+ eni_id: {{ "eni.interface.id" }}
+ delete_on_termination: true
+
+'''
+
+import time
+import xml.etree.ElementTree as ET
+import re
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def get_error_message(xml_string):
+
+ root = ET.fromstring(xml_string)
+ for message in root.findall('.//Message'):
+ return message.text
+
+
+def get_eni_info(interface):
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+def wait_for_eni(eni, status):
+
+ while True:
+ time.sleep(3)
+ eni.update()
+ # If the status is detached we just need attachment to disappear
+ if eni.attachment is None:
+ if status == "detached":
+ break
+ else:
+ if status == "attached" and eni.attachment.status == "attached":
+ break
+
+
+def create_eni(connection, module):
+
+ instance_id = module.params.get("instance_id")
+ if instance_id == 'None':
+ instance_id = None
+ do_detach = True
+ else:
+ do_detach = False
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ changed = False
+
+ try:
+ eni = compare_eni(connection, module)
+ if eni is None:
+ eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
+ if instance_id is not None:
+ try:
+ eni.attach(instance_id, device_index)
+ except BotoServerError as ex:
+ eni.delete()
+ raise
+ changed = True
+ # Wait to allow creation / attachment to finish
+ wait_for_eni(eni, "attached")
+ eni.update()
+
+ except BotoServerError as e:
+ module.fail_json(msg=get_error_message(e.args[2]))
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ instance_id = module.params.get("instance_id")
+ if instance_id == 'None':
+ instance_id = None
+ do_detach = True
+ else:
+ do_detach = False
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ force_detach = module.params.get("force_detach")
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ changed = False
+
+
+ try:
+ # Get the eni with the eni_id specified
+ eni_result_set = connection.get_all_network_interfaces(eni_id)
+ eni = eni_result_set[0]
+ if description is not None:
+ if eni.description != description:
+ connection.modify_network_interface_attribute(eni.id, "description", description)
+ changed = True
+ if security_groups is not None:
+ if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups):
+ connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups)
+ changed = True
+ if source_dest_check is not None:
+ if eni.source_dest_check != source_dest_check:
+ connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ if delete_on_termination is not None:
+ if eni.attachment is not None:
+ if eni.attachment.delete_on_termination is not delete_on_termination:
+ connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
+ changed = True
+ else:
+ module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached")
+ if eni.attachment is not None and instance_id is None and do_detach is True:
+ eni.detach(force_detach)
+ wait_for_eni(eni, "detached")
+ changed = True
+ else:
+ if instance_id is not None:
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+
+ except BotoServerError as e:
+ print e
+ module.fail_json(msg=get_error_message(e.args[2]))
+
+ eni.update()
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def delete_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ force_detach = module.params.get("force_detach")
+
+ try:
+ eni_result_set = connection.get_all_network_interfaces(eni_id)
+ eni = eni_result_set[0]
+
+ if force_detach is True:
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ # Wait to allow detachment to finish
+ wait_for_eni(eni, "detached")
+ eni.update()
+ eni.delete()
+ changed = True
+ else:
+ eni.delete()
+ changed = True
+
+ module.exit_json(changed=changed)
+ except BotoServerError as e:
+ msg = get_error_message(e.args[2])
+ regex = re.compile('The networkInterface ID \'.*\' does not exist')
+ if regex.search(msg) is not None:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg=get_error_message(e.args[2]))
+
+def compare_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+
+ try:
+ all_eni = connection.get_all_network_interfaces(eni_id)
+
+ for eni in all_eni:
+ remote_security_groups = get_sec_group_list(eni.groups)
+ if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups):
+ return eni
+
+ except BotoServerError as e:
+ module.fail_json(msg=get_error_message(e.args[2]))
+
+ return None
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group.id.encode())
+
+ return remote_security_groups
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ eni_id = dict(default=None),
+ instance_id = dict(default=None),
+ private_ip_address = dict(),
+ subnet_id = dict(),
+ description = dict(),
+ security_groups = dict(type='list'),
+ device_index = dict(default=0, type='int'),
+ state = dict(default='present', choices=['present', 'absent']),
+ force_detach = dict(default='no', type='bool'),
+ source_dest_check = dict(default=None, type='bool'),
+ delete_on_termination = dict(default=None, type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+ eni_id = module.params.get("eni_id")
+
+ if state == 'present':
+ if eni_id is None:
+ if module.params.get("subnet_id") is None:
+ module.fail_json(msg="subnet_id must be specified when state=present")
+ create_eni(connection, module)
+ else:
+ modify_eni(connection, module)
+ elif state == 'absent':
+ if eni_id is None:
+ module.fail_json(msg="eni_id must be specified")
+ else:
+ delete_eni(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+# this is magic, see lib/ansible/module_common.py
+#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
+
+main()
diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py
index 76347c84..981358c3 100644
--- a/cloud/amazon/ec2_eni_facts.py
+++ b/cloud/amazon/ec2_eni_facts.py
@@ -25,8 +25,8 @@ options:
eni_id:
description:
- The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned.
- required = false
- default = null
+ required: false
+ default: null
extends_documentation_fragment: aws
'''
diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py
new file mode 100644
index 00000000..63be4824
--- /dev/null
+++ b/cloud/amazon/ec2_vpc_igw.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+version_added: "2.0"
+author: Robert Estelle, @erydo
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ default: null
+ state:
+ description:
+ - Create or terminate the IGW
+ required: false
+ default: present
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use
+# in setting up NATs etc.
+ local_action:
+ module: ec2_vpc_igw
+ vpc_id: {{vpc.vpc_id}}
+ region: {{vpc.vpc.region}}
+ state: present
+ register: igw
+'''
+
+
+import sys # noqa
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+
+class AnsibleIGWException(Exception):
+ pass
+
+
+def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if not igws:
+ return {'changed': False}
+
+ if check_mode:
+ return {'changed': True}
+
+ for igw in igws:
+ try:
+ vpc_conn.detach_internet_gateway(igw.id, vpc_id)
+ vpc_conn.delete_internet_gateway(igw.id)
+ except EC2ResponseError as e:
+ raise AnsibleIGWException(
+ 'Unable to delete Internet Gateway, error: {0}'.format(e))
+
+ return {'changed': True}
+
+
+def ensure_igw_present(vpc_conn, vpc_id, check_mode):
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if len(igws) > 1:
+ raise AnsibleIGWException(
+ 'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
+ .format(vpc_id))
+
+ if igws:
+ return {'changed': False, 'gateway_id': igws[0].id}
+ else:
+ if check_mode:
+ return {'changed': True, 'gateway_id': None}
+
+ try:
+ igw = vpc_conn.create_internet_gateway()
+ vpc_conn.attach_internet_gateway(igw.id, vpc_id)
+ return {'changed': True, 'gateway_id': igw.id}
+ except EC2ResponseError as e:
+ raise AnsibleIGWException(
+ 'Unable to create Internet Gateway, error: {0}'.format(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ vpc_id = dict(required=True),
+ state = dict(choices=['present', 'absent'], default='present')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ vpc_id = module.params.get('vpc_id')
+ state = module.params.get('state', 'present')
+
+ try:
+ if state == 'present':
+ result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
+ except AnsibleIGWException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import * # noqa
+from ansible.module_utils.ec2 import * # noqa
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py
index b9cb0294..6086c42f 100644
--- a/cloud/amazon/ec2_win_password.py
+++ b/cloud/amazon/ec2_win_password.py
@@ -15,14 +15,33 @@ options:
required: true
key_file:
description:
- - path to the file containing the key pair used on the instance
+ - Path to the file containing the key pair used on the instance.
required: true
+ key_passphrase:
+ version_added: "2.0"
+ description:
+ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
+ required: false
+ default: null
region:
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
+ wait:
+ version_added: "2.0"
+ description:
+ - Whether or not to wait for the password to be available before returning.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ version_added: "2.0"
+ description:
+ - Number of seconds to wait before giving up.
+ required: false
+ default: 120
extends_documentation_fragment: aws
'''
@@ -36,12 +55,34 @@ tasks:
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
+
+# Example of getting a password with a password protected key
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_protected_test_key.pem"
+ key_passphrase: "secret"
+
+# Example of waiting for a password
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+ wait: yes
+ wait_timeout: 45
'''
from base64 import b64decode
from os.path import expanduser
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
+import datetime
try:
import boto.ec2
@@ -54,6 +95,9 @@ def main():
argument_spec.update(dict(
instance_id = dict(required=True),
key_file = dict(required=True),
+ key_passphrase = dict(no_log=True, default=None, required=False),
+ wait = dict(type='bool', default=False, required=False),
+ wait_timeout = dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@@ -63,26 +107,48 @@ def main():
instance_id = module.params.get('instance_id')
key_file = expanduser(module.params.get('key_file'))
+ key_passphrase = module.params.get('key_passphrase')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
ec2 = ec2_connect(module)
- data = ec2.get_password_data(instance_id)
- decoded = b64decode(data)
+ if wait:
+ start = datetime.datetime.now()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+
+ while datetime.datetime.now() < end:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+ if wait and not decoded:
+ time.sleep(5)
+ else:
+ break
+ else:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+
+ if wait and datetime.datetime.now() >= end:
+ module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
f = open(key_file, 'r')
- key = RSA.importKey(f.read())
+ key = RSA.importKey(f.read(), key_passphrase)
cipher = PKCS1_v1_5.new(key)
sentinel = 'password decryption failed!!!'
try:
- decrypted = cipher.decrypt(decoded, sentinel)
+ decrypted = cipher.decrypt(decoded, sentinel)
except ValueError as e:
- decrypted = None
+ decrypted = None
if decrypted == None:
module.exit_json(win_password='', changed=False)
else:
- module.exit_json(win_password=decrypted, changed=True)
+ if wait:
+ elapsed = datetime.datetime.now() - start
+ module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
+ else:
+ module.exit_json(win_password=decrypted, changed=True)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py
new file mode 100644
index 00000000..4630e00d
--- /dev/null
+++ b/cloud/amazon/route53_zone.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+module: route53_zone
+short_description: add or delete Route53 zones
+description:
+ - Creates and deletes Route53 private and public zones
+version_added: "2.0"
+options:
+ zone:
+ description:
+ - "The DNS zone record (eg: foo.com.)"
+ required: true
+ state:
+ description:
+ - whether or not the zone should exist or not
+ required: false
+ default: true
+ choices: [ "present", "absent" ]
+ vpc_id:
+ description:
+ - The VPC ID the zone should be a part of (if this is going to be a private zone)
+ required: false
+ default: null
+ vpc_region:
+ description:
+ - The VPC Region the zone should be a part of (if this is going to be a private zone)
+ required: false
+ default: null
+ comment:
+ description:
+ - Comment associated with the zone
+ required: false
+ default: ''
+extends_documentation_fragment: aws
+author: "Christopher Troup (@minichate)"
+'''
+
+import time
+
+try:
+ import boto
+ import boto.ec2
+ from boto import route53
+ from boto.route53 import Route53Connection
+ from boto.route53.zone import Zone
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ zone=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ vpc_id=dict(default=None),
+ vpc_region=dict(default=None),
+ comment=dict(default=''),
+ )
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ zone_in = module.params.get('zone').lower()
+ state = module.params.get('state').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ comment = module.params.get('comment')
+
+ private_zone = vpc_id is not None and vpc_region is not None
+
+ _, _, aws_connect_kwargs = get_aws_connection_info(module)
+
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg=e.error_message)
+
+ results = conn.get_all_hosted_zones()
+ zones = {}
+
+ for r53zone in results['ListHostedZonesResponse']['HostedZones']:
+ zone_id = r53zone['Id'].replace('/hostedzone/', '')
+ zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
+ if vpc_id and 'VPCs' in zone_details:
+ # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
+ if isinstance(zone_details['VPCs'], dict):
+ if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
+ zones[r53zone['Name']] = zone_id
+ else: # Forward compatibility for when boto fixes that bug
+ if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
+ zones[r53zone['Name']] = zone_id
+ else:
+ zones[r53zone['Name']] = zone_id
+
+ record = {
+ 'private_zone': private_zone,
+ 'vpc_id': vpc_id,
+ 'vpc_region': vpc_region,
+ 'comment': comment,
+ }
+
+ if state == 'present' and zone_in in zones:
+ if private_zone:
+ details = conn.get_hosted_zone(zones[zone_in])
+
+ if 'VPCs' not in details['GetHostedZoneResponse']:
+ module.fail_json(
+ msg="Can't change VPC from public to private"
+ )
+
+ vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
+ current_vpc_id = vpc_details['VPCId']
+ current_vpc_region = vpc_details['VPCRegion']
+
+ if current_vpc_id != vpc_id:
+ module.fail_json(
+ msg="Can't change VPC ID once a zone has been created"
+ )
+ if current_vpc_region != vpc_region:
+ module.fail_json(
+ msg="Can't change VPC Region once a zone has been created"
+ )
+
+ record['zone_id'] = zones[zone_in]
+ record['name'] = zone_in
+ module.exit_json(changed=False, set=record)
+
+ elif state == 'present':
+ result = conn.create_hosted_zone(zone_in, **record)
+ hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
+ zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
+ record['zone_id'] = zone_id
+ record['name'] = zone_in
+ module.exit_json(changed=True, set=record)
+
+ elif state == 'absent' and zone_in in zones:
+ conn.delete_hosted_zone(zones[zone_in])
+ module.exit_json(changed=True)
+
+ elif state == 'absent':
+ module.exit_json(changed=False)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/cloud/amazon/s3_logging.py b/cloud/amazon/s3_logging.py
new file mode 100644
index 00000000..75b3fe73
--- /dev/null
+++ b/cloud/amazon/s3_logging.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: s3_logging
+short_description: Manage logging facility of an s3 bucket in AWS
+description:
+ - Manage logging facility of an s3 bucket in AWS
+version_added: "2.0"
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket."
+ required: true
+ region:
+ description:
+ - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
+ required: false
+ default: null
+ state:
+ description:
+ - "Enable or disable logging."
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ target_bucket:
+ description:
+ - "The bucket to log to. Required when state=present."
+ required: false
+ default: null
+ target_prefix:
+ description:
+ - "The prefix that should be prepended to the generated log files written to the target_bucket."
+ required: false
+ default: ""
+
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
+ s3_logging:
+ name: mywebsite.com
+ target_bucket: mylogs
+ target_prefix: logs/mywebsite.com
+ state: present
+
+- name: Remove logging on an s3 bucket
+ s3_logging:
+ name: mywebsite.com
+ state: absent
+
+'''
+
+try:
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def compare_bucket_logging(bucket, target_bucket, target_prefix):
+
+ bucket_log_obj = bucket.get_logging_status()
+ if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
+ return False
+ else:
+ return True
+
+
+def enable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ target_bucket = module.params.get("target_bucket")
+ target_prefix = module.params.get("target_prefix")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ try:
+ if not compare_bucket_logging(bucket, target_bucket, target_prefix):
+ # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
+ try:
+ target_bucket_obj = connection.get_bucket(target_bucket)
+ except S3ResponseError as e:
+ if e.status == 301:
+ module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
+ else:
+ module.fail_json(msg=e.message)
+ target_bucket_obj.set_as_logging_target()
+
+ bucket.enable_logging(target_bucket, target_prefix)
+ changed = True
+
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def disable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ if not compare_bucket_logging(bucket, None, None):
+ bucket.disable_logging()
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=True),
+ target_bucket = dict(required=False, default=None),
+ target_prefix = dict(required=False, default=""),
+ state = dict(required=False, default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ try:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_bucket_logging(connection, module)
+ elif state == 'absent':
+ disable_bucket_logging(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/cloud/amazon/sts_assume_role.py b/cloud/amazon/sts_assume_role.py
new file mode 100644
index 00000000..7eec28b8
--- /dev/null
+++ b/cloud/amazon/sts_assume_role.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials
+version_added: "2.0"
+author: Boris Ekelchik (@bekelchik)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
+ required: true
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail
+ required: true
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ required: false
+ default: null
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.
+ required: false
+ default: null
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ required: false
+ default: null
+ mfa_serial_number:
+ description:
+ - he identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ required: false
+ default: null
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ required: false
+ default: null
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ session_name: "someRoleSession"
+register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+import sys
+import time
+
+try:
+ import boto.sts
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def assume_role_policy(connection, module):
+
+ role_arn = module.params.get('role_arn')
+ role_session_name = module.params.get('role_session_name')
+ policy = module.params.get('policy')
+ duration_seconds = module.params.get('duration_seconds')
+ external_id = module.params.get('external_id')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ try:
+ assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token)
+ changed = True
+ except BotoServerError, e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ role_arn = dict(required=True, default=None),
+ role_session_name = dict(required=True, default=None),
+ duration_seconds = dict(required=False, default=None, type='int'),
+ external_id = dict(required=False, default=None),
+ policy = dict(required=False, default=None),
+ mfa_serial_number = dict(required=False, default=None),
+ mfa_token = dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.sts, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ try:
+ assume_role_policy(connection, module)
+ except BotoServerError, e:
+ module.fail_json(msg=e)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+main()
diff --git a/cloud/centurylink/__init__.py b/cloud/centurylink/__init__.py
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/cloud/centurylink/__init__.py
@@ -0,0 +1 @@
+
diff --git a/cloud/centurylink/clc_publicip.py b/cloud/centurylink/clc_publicip.py
new file mode 100644
index 00000000..77632c1c
--- /dev/null
+++ b/cloud/centurylink/clc_publicip.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+version_added: 1.0
+options:
+ protocol:
+ descirption:
+ - The protocol that the public IP will listen for.
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ required: False
+ ports:
+ description:
+ - A list of ports to expose.
+ required: True
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ required: True
+ state:
+ description:
+ - Determine wheteher to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ default: present
+ choices: ['present', 'absent']
+ required: False
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME: the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD: the account passwod for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN: the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS: the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ clc_publicip:
+ protocol: 'TCP'
+ ports:
+ - 80
+ server_ids:
+ - UC1ACCTSRVR01
+ - UC1ACCTSRVR02
+ state: present
+ register: clc
+
+ - name: debug
+ debug: var=clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ clc_publicip:
+ server_ids:
+ - UC1ACCTSRVR01
+ - UC1ACCTSRVR02
+ state: absent
+ register: clc
+
+ - name: debug
+ debug: var=clc
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+ group_dict = {}
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :param params: dictionary of module parameters
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+ requests = []
+ chagned_server_ids = []
+ changed = False
+
+ if state == 'present':
+ changed, chagned_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, chagned_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=chagned_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list', required=True),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException, ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py
index cc487af5..d1302854 100644
--- a/cloud/cloudstack/cs_account.py
+++ b/cloud/cloudstack/cs_account.py
@@ -400,11 +400,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py
index 580cc5d7..cfd76816 100644
--- a/cloud/cloudstack/cs_affinitygroup.py
+++ b/cloud/cloudstack/cs_affinitygroup.py
@@ -246,11 +246,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_facts.py b/cloud/cloudstack/cs_facts.py
new file mode 100644
index 00000000..e2bebf8b
--- /dev/null
+++ b/cloud/cloudstack/cs_facts.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_facts
+short_description: Gather facts on instances of Apache CloudStack based clouds.
+description:
+ - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ filter:
+ description:
+ - Filter for a specific fact.
+ required: false
+ default: null
+ choices:
+ - cloudstack_service_offering
+ - cloudstack_availability_zone
+ - cloudstack_public_hostname
+ - cloudstack_public_ipv4
+ - cloudstack_local_hostname
+ - cloudstack_local_ipv4
+ - cloudstack_instance_id
+ - cloudstack_user_data
+requirements: [ 'yaml' ]
+'''
+
+EXAMPLES = '''
+# Gather all facts on instances
+- name: Gather cloudstack facts
+ cs_facts:
+
+# Gather specific fact on instances
+- name: Gather cloudstack facts
+ cs_facts: filter=cloudstack_instance_id
+'''
+
+RETURN = '''
+---
+cloudstack_availability_zone:
+ description: zone the instance is deployed in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+cloudstack_instance_id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_local_hostname:
+ description: local hostname of the instance.
+ returned: success
+ type: string
+ sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_local_ipv4:
+ description: local IPv4 of the instance.
+ returned: success
+ type: string
+ sample: 185.19.28.35
+cloudstack_public_hostname:
+ description: public hostname of the instance.
+ returned: success
+ type: string
+ sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_public_ipv4:
+ description: public IPv4 of the instance.
+ returned: success
+ type: string
+ sample: 185.19.28.35
+cloudstack_service_offering:
+ description: service offering of the instance.
+ returned: success
+ type: string
+ sample: Micro 512mb 1cpu
+cloudstack_user_data:
+ description: data of the instance provided by users.
+ returned: success
+ type: dict
+ sample: { "bla": "foo" }
+'''
+
+import os
+
+try:
+ import yaml
+ has_lib_yaml = True
+except ImportError:
+ has_lib_yaml = False
+
+CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
+CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
+
+class CloudStackFacts(object):
+
+ def __init__(self):
+ self.facts = ansible_facts(module)
+ self.api_ip = None
+ self.fact_paths = {
+ 'cloudstack_service_offering': 'service-offering',
+ 'cloudstack_availability_zone': 'availability-zone',
+ 'cloudstack_public_hostname': 'public-hostname',
+ 'cloudstack_public_ipv4': 'public-ipv4',
+ 'cloudstack_local_hostname': 'local-hostname',
+ 'cloudstack_local_ipv4': 'local-ipv4',
+ 'cloudstack_instance_id': 'instance-id'
+ }
+
+ def run(self):
+ result = {}
+ filter = module.params.get('filter')
+ if not filter:
+ for key,path in self.fact_paths.iteritems():
+ result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
+ result['cloudstack_user_data'] = self._get_user_data_json()
+ else:
+ if filter == 'cloudstack_user_data':
+ result['cloudstack_user_data'] = self._get_user_data_json()
+ elif filter in self.fact_paths:
+ result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
+ return result
+
+
+ def _get_user_data_json(self):
+ try:
+ # this data come form users, we try what we can to parse it...
+ return yaml.load(self._fetch(CS_USERDATA_BASE_URL))
+ except:
+ return None
+
+
+ def _fetch(self, path):
+ api_ip = self._get_api_ip()
+ if not api_ip:
+ return None
+ api_url = path % api_ip
+ (response, info) = fetch_url(module, api_url, force=True)
+ if response:
+ data = response.read()
+ else:
+ data = None
+ return data
+
+
+ def _get_dhcp_lease_file(self):
+ """Return the path of the lease file."""
+ default_iface = self.facts['default_ipv4']['interface']
+ dhcp_lease_file_locations = [
+ '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
+ '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
+ '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
+ '/var/db/dhclient.leases.%s' % default_iface, # openbsd
+ ]
+ for file_path in dhcp_lease_file_locations:
+ if os.path.exists(file_path):
+ return file_path
+ module.fail_json(msg="Could not find dhclient leases file.")
+
+
+ def _get_api_ip(self):
+ """Return the IP of the DHCP server."""
+ if not self.api_ip:
+ dhcp_lease_file = self._get_dhcp_lease_file()
+ for line in open(dhcp_lease_file):
+ if 'dhcp-server-identifier' in line:
+ # get IP of string "option dhcp-server-identifier 185.19.28.176;"
+ line = line.translate(None, ';')
+ self.api_ip = line.split()[2]
+ break
+ if not self.api_ip:
+ module.fail_json(msg="No dhcp-server-identifier found in leases file.")
+ return self.api_ip
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ filter = dict(default=None, choices=[
+ 'cloudstack_service_offering',
+ 'cloudstack_availability_zone',
+ 'cloudstack_public_hostname',
+ 'cloudstack_public_ipv4',
+ 'cloudstack_local_hostname',
+ 'cloudstack_local_ipv4',
+ 'cloudstack_instance_id',
+ 'cloudstack_user_data',
+ ]),
+ ),
+ supports_check_mode=False
+ )
+
+ if not has_lib_yaml:
+ module.fail_json(msg="missing python library: yaml")
+
+ cs_facts = CloudStackFacts().run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.facts import *
+main()
diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py
index 96b3f20f..27350eab 100644
--- a/cloud/cloudstack/cs_firewall.py
+++ b/cloud/cloudstack/cs_firewall.py
@@ -216,18 +216,12 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
self.firewall_rule = None
- def get_end_port(self):
- if self.module.params.get('end_port'):
- return self.module.params.get('end_port')
- return self.module.params.get('start_port')
-
-
def get_firewall_rule(self):
if not self.firewall_rule:
cidr = self.module.params.get('cidr')
protocol = self.module.params.get('protocol')
start_port = self.module.params.get('start_port')
- end_port = self.get_end_port()
+ end_port = self.get_or_fallback('end_port', 'start_port')
icmp_code = self.module.params.get('icmp_code')
icmp_type = self.module.params.get('icmp_type')
fw_type = self.module.params.get('type')
@@ -328,7 +322,7 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
args['cidrlist'] = self.module.params.get('cidr')
args['protocol'] = self.module.params.get('protocol')
args['startport'] = self.module.params.get('start_port')
- args['endport'] = self.get_end_port()
+ args['endport'] = self.get_or_fallback('end_port', 'start_port')
args['icmptype'] = self.module.params.get('icmp_type')
args['icmpcode'] = self.module.params.get('icmp_code')
@@ -451,11 +445,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py
index a93a5243..f8bef7c8 100644
--- a/cloud/cloudstack/cs_instance.py
+++ b/cloud/cloudstack/cs_instance.py
@@ -70,8 +70,8 @@ options:
hypervisor:
description:
- Name the hypervisor to be used for creating the new instance.
- - Relevant when using C(state=present) and option C(ISO) is used.
- - If not set, first found hypervisor will be used.
+ - Relevant when using C(state=present), but only considered if not set on ISO/template.
+ - If not set or found on ISO/template, first found hypervisor will be used.
required: false
default: null
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
@@ -355,6 +355,8 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
def __init__(self, module):
AnsibleCloudStack.__init__(self, module)
self.instance = None
+ self.template = None
+ self.iso = None
def get_service_offering_id(self):
@@ -371,7 +373,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
- def get_template_or_iso_id(self):
+ def get_template_or_iso(self, key=None):
template = self.module.params.get('template')
iso = self.module.params.get('iso')
@@ -382,27 +384,35 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
self.module.fail_json(msg="Template are ISO are mutually exclusive.")
args = {}
- args['account'] = self.get_account('name')
- args['domainid'] = self.get_domain('id')
- args['projectid'] = self.get_project('id')
- args['zoneid'] = self.get_zone('id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['isrecursive'] = True
if template:
+ if self.template:
+ return self._get_by_key(key, self.template)
+
args['templatefilter'] = 'executable'
templates = self.cs.listTemplates(**args)
if templates:
for t in templates['template']:
if template in [ t['displaytext'], t['name'], t['id'] ]:
- return t['id']
+ self.template = t
+ return self._get_by_key(key, self.template)
self.module.fail_json(msg="Template '%s' not found" % template)
elif iso:
+ if self.iso:
+ return self._get_by_key(key, self.iso)
args['isofilter'] = 'executable'
isos = self.cs.listIsos(**args)
if isos:
for i in isos['iso']:
if iso in [ i['displaytext'], i['name'], i['id'] ]:
- return i['id']
+ self.iso = i
+ return self._get_by_key(key, self.iso)
self.module.fail_json(msg="ISO '%s' not found" % iso)
@@ -412,10 +422,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
if not disk_offering:
return None
- args = {}
- args['domainid'] = self.get_domain('id')
-
- disk_offerings = self.cs.listDiskOfferings(**args)
+ disk_offerings = self.cs.listDiskOfferings()
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
@@ -429,11 +436,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
instance_name = self.module.params.get('name')
args = {}
- args['account'] = self.get_account('name')
- args['domainid'] = self.get_domain('id')
- args['projectid'] = self.get_project('id')
- args['zoneid'] = self.get_zone('id')
-
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ # Do not pass zoneid, as the instance name must be unique across zones.
instances = self.cs.listVirtualMachines(**args)
if instances:
for v in instances['virtualmachine']:
@@ -449,10 +455,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
return None
args = {}
- args['account'] = self.get_account('name')
- args['domainid'] = self.get_domain('id')
- args['projectid'] = self.get_project('id')
- args['zoneid'] = self.get_zone('id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
networks = self.cs.listNetworks(**args)
if not networks:
@@ -479,8 +485,10 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
instance = self.deploy_instance()
else:
instance = self.update_instance(instance)
-
- instance = self.ensure_tags(resource=instance, resource_type='UserVm')
+
+ # In check mode, we do not necessarely have an instance
+ if instance:
+ instance = self.ensure_tags(resource=instance, resource_type='UserVm')
return instance
@@ -492,37 +500,34 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
return user_data
- def get_display_name(self):
- display_name = self.module.params.get('display_name')
- if not display_name:
- display_name = self.module.params.get('name')
- return display_name
-
-
def deploy_instance(self):
self.result['changed'] = True
args = {}
- args['templateid'] = self.get_template_or_iso_id()
- args['zoneid'] = self.get_zone('id')
+ args['templateid'] = self.get_template_or_iso(key='id')
+ args['zoneid'] = self.get_zone(key='id')
args['serviceofferingid'] = self.get_service_offering_id()
- args['account'] = self.get_account('name')
- args['domainid'] = self.get_domain('id')
- args['projectid'] = self.get_project('id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
args['diskofferingid'] = self.get_disk_offering_id()
args['networkids'] = self.get_network_ids()
- args['hypervisor'] = self.get_hypervisor()
args['userdata'] = self.get_user_data()
args['keyboard'] = self.module.params.get('keyboard')
args['ipaddress'] = self.module.params.get('ip_address')
args['ip6address'] = self.module.params.get('ip6_address')
args['name'] = self.module.params.get('name')
+ args['displayname'] = self.get_or_fallback('display_name', 'name')
args['group'] = self.module.params.get('group')
args['keypair'] = self.module.params.get('ssh_key')
args['size'] = self.module.params.get('disk_size')
args['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
+ template_iso = self.get_template_or_iso()
+ if 'hypervisor' not in template_iso:
+ args['hypervisor'] = self.get_hypervisor()
+
instance = None
if not self.module.check_mode:
instance = self.cs.deployVirtualMachine(**args)
@@ -544,14 +549,14 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
args_instance_update = {}
args_instance_update['id'] = instance['id']
args_instance_update['group'] = self.module.params.get('group')
- args_instance_update['displayname'] = self.get_display_name()
+ args_instance_update['displayname'] = self.get_or_fallback('display_name', 'name')
args_instance_update['userdata'] = self.get_user_data()
- args_instance_update['ostypeid'] = self.get_os_type('id')
+ args_instance_update['ostypeid'] = self.get_os_type(key='id')
args_ssh_key = {}
args_ssh_key['id'] = instance['id']
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
- args_ssh_key['projectid'] = self.get_project('id')
+ args_ssh_key['projectid'] = self.get_project(key='id')
if self._has_changed(args_service_offering, instance) or \
self._has_changed(args_instance_update, instance) or \
@@ -624,7 +629,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
self.result['changed'] = True
if not self.module.check_mode:
- res = self.cs.expungeVirtualMachine(id=instance['id'])
+ res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
elif instance['state'].lower() not in [ 'expunging' ]:
self.result['changed'] = True
@@ -636,7 +641,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
poll_async = self.module.params.get('poll_async')
if poll_async:
- instance = self._poll_job(res, 'virtualmachine')
+ res = self._poll_job(res, 'virtualmachine')
return instance
@@ -843,11 +848,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py
index 478748ae..7280ceff 100644
--- a/cloud/cloudstack/cs_instancegroup.py
+++ b/cloud/cloudstack/cs_instancegroup.py
@@ -223,11 +223,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py
index e3ba322f..67e4b283 100644
--- a/cloud/cloudstack/cs_iso.py
+++ b/cloud/cloudstack/cs_iso.py
@@ -354,11 +354,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py
index b602b345..c4fd51b7 100644
--- a/cloud/cloudstack/cs_network.py
+++ b/cloud/cloudstack/cs_network.py
@@ -335,13 +335,6 @@ class AnsibleCloudStackNetwork(AnsibleCloudStack):
self.network = None
- def get_or_fallback(self, key=None, fallback_key=None):
- value = self.module.params.get(key)
- if not value:
- value = self.module.params.get(fallback_key)
- return value
-
-
def get_vpc(self, key=None):
vpc = self.module.params.get('vpc')
if not vpc:
@@ -380,7 +373,7 @@ class AnsibleCloudStackNetwork(AnsibleCloudStack):
def _get_args(self):
args = {}
args['name'] = self.module.params.get('name')
- args['displaytext'] = self.get_or_fallback('displaytext','name')
+ args['displaytext'] = self.get_or_fallback('displaytext', 'name')
args['networkdomain'] = self.module.params.get('network_domain')
args['networkofferingid'] = self.get_network_offering(key='id')
return args
@@ -627,11 +620,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py
index 3b88ca85..d1b8db4d 100644
--- a/cloud/cloudstack/cs_portforward.py
+++ b/cloud/cloudstack/cs_portforward.py
@@ -217,18 +217,6 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
self.vm_default_nic = None
- def get_public_end_port(self):
- if not self.module.params.get('public_end_port'):
- return self.module.params.get('public_port')
- return self.module.params.get('public_end_port')
-
-
- def get_private_end_port(self):
- if not self.module.params.get('private_end_port'):
- return self.module.params.get('private_port')
- return self.module.params.get('private_end_port')
-
-
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
@@ -259,9 +247,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
- public_end_port = self.get_public_end_port()
+ public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
- private_end_port = self.get_public_end_port()
+ private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
@@ -290,9 +278,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
- args['publicendport'] = self.get_public_end_port()
+ args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
- args['privateendport'] = self.get_private_end_port()
+ args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
@@ -312,9 +300,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
- args['publicendport'] = self.get_public_end_port()
+ args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
- args['privateendport'] = self.get_private_end_port()
+ args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
@@ -427,11 +415,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py
index 0f391bc5..896232f3 100644
--- a/cloud/cloudstack/cs_project.py
+++ b/cloud/cloudstack/cs_project.py
@@ -148,13 +148,6 @@ class AnsibleCloudStackProject(AnsibleCloudStack):
self.project = None
- def get_displaytext(self):
- displaytext = self.module.params.get('displaytext')
- if not displaytext:
- displaytext = self.module.params.get('name')
- return displaytext
-
-
def get_project(self):
if not self.project:
project = self.module.params.get('name')
@@ -184,7 +177,7 @@ class AnsibleCloudStackProject(AnsibleCloudStack):
def update_project(self, project):
args = {}
args['id'] = project['id']
- args['displaytext'] = self.get_displaytext()
+ args['displaytext'] = self.get_or_fallback('displaytext', 'name')
if self._has_changed(args, project):
self.result['changed'] = True
@@ -205,7 +198,7 @@ class AnsibleCloudStackProject(AnsibleCloudStack):
args = {}
args['name'] = self.module.params.get('name')
- args['displaytext'] = self.get_displaytext()
+ args['displaytext'] = self.get_or_fallback('displaytext', 'name')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
@@ -332,11 +325,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py
index 54a71686..a6827f6f 100644
--- a/cloud/cloudstack/cs_securitygroup.py
+++ b/cloud/cloudstack/cs_securitygroup.py
@@ -190,11 +190,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py
index e943e7d1..65bd7fd5 100644
--- a/cloud/cloudstack/cs_securitygroup_rule.py
+++ b/cloud/cloudstack/cs_securitygroup_rule.py
@@ -222,18 +222,12 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
and cidr == rule['cidr']
- def get_end_port(self):
- if self.module.params.get('end_port'):
- return self.module.params.get('end_port')
- return self.module.params.get('start_port')
-
-
def _get_rule(self, rules):
user_security_group_name = self.module.params.get('user_security_group')
cidr = self.module.params.get('cidr')
protocol = self.module.params.get('protocol')
start_port = self.module.params.get('start_port')
- end_port = self.get_end_port()
+ end_port = self.get_or_fallback('end_port', 'start_port')
icmp_code = self.module.params.get('icmp_code')
icmp_type = self.module.params.get('icmp_type')
@@ -291,7 +285,7 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
args['protocol'] = self.module.params.get('protocol')
args['startport'] = self.module.params.get('start_port')
- args['endport'] = self.get_end_port()
+ args['endport'] = self.get_or_fallback('end_port', 'start_port')
args['icmptype'] = self.module.params.get('icmp_type')
args['icmpcode'] = self.module.params.get('icmp_code')
args['projectid'] = self.get_project('id')
@@ -429,11 +423,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py
index 180e96ca..28c6b380 100644
--- a/cloud/cloudstack/cs_sshkeypair.py
+++ b/cloud/cloudstack/cs_sshkeypair.py
@@ -249,11 +249,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_staticnat.py b/cloud/cloudstack/cs_staticnat.py
new file mode 100644
index 00000000..5761a399
--- /dev/null
+++ b/cloud/cloudstack/cs_staticnat.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: cs_staticnat
+short_description: Manages static NATs on Apache CloudStack based clouds.
+description:
+ - Create, update and remove static NATs.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the static NAT is assigned to.
+ required: true
+ vm:
+ description:
+ - Name of virtual machine which we make the static NAT for.
+ - Required if C(state=present).
+ required: false
+ default: null
+ vm_guest_ip:
+ description:
+ - VM guest NIC secondary IP address for the static NAT.
+ required: false
+ default: false
+ state:
+ description:
+ - State of the static NAT.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the static NAT is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the static NAT is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the static NAT is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a static NAT: 1.2.3.4 -> web01
+- local_action:
+ module: cs_staticnat
+ ip_address: 1.2.3.4
+ vm: web01
+
+# remove a static NAT
+- local_action:
+ module: cs_staticnat
+ ip_address: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+vm_name:
+ description: Name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_display_name:
+ description: Display name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_guest_ip:
+ description: IP of the virtual machine.
+ returned: success
+ type: string
+ sample: 10.101.65.152
+zone:
+ description: Name of zone the static NAT is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the static NAT is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the static NAT is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the static NAT is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+
+try:
+ from cs import CloudStack, CloudStackException, read_config
+ has_lib_cs = True
+except ImportError:
+ has_lib_cs = False
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackStaticNat(AnsibleCloudStack):
+
+ def __init__(self, module):
+ AnsibleCloudStack.__init__(self, module)
+ self.vm_default_nic = None
+
+
+# TODO: move it to cloudstack utils, also used in cs_portforward
+ def get_vm_guest_ip(self):
+ vm_guest_ip = self.module.params.get('vm_guest_ip')
+ default_nic = self.get_vm_default_nic()
+
+ if not vm_guest_ip:
+ return default_nic['ipaddress']
+
+ for secondary_ip in default_nic['secondaryip']:
+ if vm_guest_ip == secondary_ip['ipaddress']:
+ return vm_guest_ip
+ self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
+
+
+# TODO: move it to cloudstack utils, also used in cs_portforward
+ def get_vm_default_nic(self):
+ if self.vm_default_nic:
+ return self.vm_default_nic
+
+ nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id'))
+ if nics:
+ for n in nics['nic']:
+ if n['isdefault']:
+ self.vm_default_nic = n
+ return self.vm_default_nic
+ self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
+
+
+ def create_static_nat(self, ip_address):
+ self.result['changed'] = True
+ args = {}
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['ipaddressid'] = ip_address['id']
+ args['vmguestip'] = self.get_vm_guest_ip()
+ if not self.module.check_mode:
+ res = self.cs.enableStaticNat(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ # reset ip address and query new values
+ self.ip_address = None
+ ip_address = self.get_ip_address()
+ return ip_address
+
+
+ def update_static_nat(self, ip_address):
+ args = {}
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['ipaddressid'] = ip_address['id']
+ args['vmguestip'] = self.get_vm_guest_ip()
+
+ # make an alias, so we can use _has_changed()
+ ip_address['vmguestip'] = ip_address['vmipaddress']
+ if self._has_changed(args, ip_address):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ res = self._poll_job(res, 'staticnat')
+ res = self.cs.enableStaticNat(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ # reset ip address and query new values
+ self.ip_address = None
+ ip_address = self.get_ip_address()
+ return ip_address
+
+
+ def present_static_nat(self):
+ ip_address = self.get_ip_address()
+ if not ip_address['isstaticnat']:
+ ip_address = self.create_static_nat(ip_address)
+ else:
+ ip_address = self.update_static_nat(ip_address)
+ return ip_address
+
+
+ def absent_static_nat(self):
+ ip_address = self.get_ip_address()
+ if ip_address['isstaticnat']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self._poll_job(res, 'staticnat')
+ return ip_address
+
+
+ def get_result(self, ip_address):
+ if ip_address:
+ if 'zonename' in ip_address:
+ self.result['zone'] = ip_address['zonename']
+ if 'domain' in ip_address:
+ self.result['domain'] = ip_address['domain']
+ if 'account' in ip_address:
+ self.result['account'] = ip_address['account']
+ if 'project' in ip_address:
+ self.result['project'] = ip_address['project']
+ if 'virtualmachinedisplayname' in ip_address:
+ self.result['vm_display_name'] = ip_address['virtualmachinedisplayname']
+ if 'virtualmachinename' in ip_address:
+ self.result['vm'] = ip_address['virtualmachinename']
+ if 'vmipaddress' in ip_address:
+ self.result['vm_guest_ip'] = ip_address['vmipaddress']
+ if 'ipaddress' in ip_address:
+ self.result['ip_address'] = ip_address['ipaddress']
+ return self.result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ ip_address = dict(required=True),
+ vm = dict(default=None),
+ vm_guest_ip = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(choices=BOOLEANS, default=True),
+ api_key = dict(default=None),
+ api_secret = dict(default=None, no_log=True),
+ api_url = dict(default=None),
+ api_http_method = dict(choices=['get', 'post'], default='get'),
+ api_timeout = dict(type='int', default=10),
+ ),
+ required_together = (
+ ['api_key', 'api_secret', 'api_url'],
+ ),
+ supports_check_mode=True
+ )
+
+ if not has_lib_cs:
+ module.fail_json(msg="python library cs required: pip install cs")
+
+ try:
+ acs_static_nat = AnsibleCloudStackStaticNat(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ ip_address = acs_static_nat.absent_static_nat()
+ else:
+ ip_address = acs_static_nat.present_static_nat()
+
+ result = acs_static_nat.get_result(ip_address)
+
+ except CloudStackException, e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py
index 1cd245d2..8e56aafa 100644
--- a/cloud/cloudstack/cs_template.py
+++ b/cloud/cloudstack/cs_template.py
@@ -623,11 +623,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py
index 24e8a46f..62dec7ca 100644
--- a/cloud/cloudstack/cs_vmsnapshot.py
+++ b/cloud/cloudstack/cs_vmsnapshot.py
@@ -317,11 +317,9 @@ def main():
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
- except Exception, e:
- module.fail_json(msg='Exception: %s' % str(e))
-
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py
index e6d70f4e..bf5fcf3c 100644
--- a/cloud/lxc/lxc_container.py
+++ b/cloud/lxc/lxc_container.py
@@ -385,6 +385,8 @@ try:
import lxc
except ImportError:
HAS_LXC = False
+else:
+ HAS_LXC = True
# LXC_COMPRESSION_MAP is a map of available compression types when creating
@@ -708,7 +710,7 @@ class LxcContainerManagement(object):
for option_line in container_config:
# Look for key in config
if option_line.startswith(key):
- _, _value = option_line.split('=')
+ _, _value = option_line.split('=', 1)
config_value = ' '.join(_value.split())
line_index = container_config.index(option_line)
# If the sanitized values don't match replace them
diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py
index 80b8e255..b59c7ed3 100644
--- a/cloud/misc/virt.py
+++ b/cloud/misc/virt.py
@@ -93,8 +93,9 @@ import sys
try:
import libvirt
except ImportError:
- print "failed=True msg='libvirt python module unavailable'"
- sys.exit(1)
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
ALL_COMMANDS = []
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
@@ -481,6 +482,11 @@ def main():
xml = dict(),
))
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
rc = VIRT_SUCCESS
try:
rc, result = core(module)
diff --git a/cloud/rackspace/__init__.py b/cloud/rackspace/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloud/rackspace/__init__.py
diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py
new file mode 100644
index 00000000..2013b8c4
--- /dev/null
+++ b/cloud/rackspace/rax_clb_ssl.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION='''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+version_added: "2.0"
+options:
+ loadbalancer:
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ private_key:
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ wait_timeout:
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson
+extends_documentation_fragment: rackspace
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout / 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.iteritems():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException, e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException, e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException, e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+main()
diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py
new file mode 100644
index 00000000..a3f29e22
--- /dev/null
+++ b/cloud/rackspace/rax_mon_alarm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ choices: [ "yes", "no" ]
+ metadata:
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py
new file mode 100644
index 00000000..14b86864
--- /dev/null
+++ b/cloud/rackspace/rax_mon_check.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ entity_id:
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ choices:
+ - remote.dns
+ - remote.ftp-banner
+ - remote.http
+ - remote.imap-banner
+ - remote.mssql-banner
+ - remote.mysql-banner
+ - remote.ping
+ - remote.pop3-banner
+ - remote.postgresql-banner
+ - remote.smtp-banner
+ - remote.smtp
+ - remote.ssh
+ - remote.tcp
+ - remote.telnet-banner
+ - agent.filesystem
+ - agent.memory
+ - agent.load_average
+ - agent.cpu
+ - agent.disk
+ - agent.network
+ - agent.plugin
+ required: true
+ monitoring_zones_poll:
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ choices: [ "yes", "no" ]
+ metadata:
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.iteritems():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py
new file mode 100644
index 00000000..f5f142d2
--- /dev/null
+++ b/cloud/rackspace/rax_mon_entity.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+version_added: "2.0"
+options:
+ label:
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ agent_id:
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.168.0.10
+ db_box: 192.168.0.11
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py
new file mode 100644
index 00000000..d7b6692d
--- /dev/null
+++ b/cloud/rackspace/rax_mon_notification.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ label:
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py
new file mode 100644
index 00000000..5bb3fa16
--- /dev/null
+++ b/cloud/rackspace/rax_mon_notification_plan.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ label:
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+main()
diff --git a/cloud/vmware/vsphere_copy b/cloud/vmware/vsphere_copy.py
index f85beab4..4364e8b5 100644
--- a/cloud/vmware/vsphere_copy
+++ b/cloud/vmware/vsphere_copy.py
@@ -55,8 +55,8 @@ options:
- The file to push to the datastore on the vCenter server.
required: true
notes:
- - This module ought to be run from a system that can access vCenter directly and has the file to transfer.
- It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to).
+ - "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
+ It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5
'''
@@ -78,6 +78,9 @@ import socket
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that VSphere accepts reliably '''
path = "/folder/%s" % path.lstrip("/")
+ # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
+ # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
+ datacenter = datacenter.replace('&', '%26')
if not path.startswith("/"):
path = "/" + path
params = dict( dsName = datastore )
@@ -120,11 +123,10 @@ def main():
atexit.register(conn.close)
remote_path = vmware_path(datastore, datacenter, dest)
- auth = base64.encodestring('%s:%s' % (login, password))
+ auth = base64.encodestring('%s:%s' % (login, password)).rstrip()
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
- "Accept": "text/plain",
"Authorization": "Basic %s" % auth,
}
@@ -147,6 +149,7 @@ def main():
else:
module.fail_json(msg='Failed to upload', status=resp.status, reason=resp.reason, length=resp.length, version=resp.version, headers=resp.getheaders(), chunked=resp.chunked, url=url)
-# this is magic, see lib/ansible/module_common.py
-#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
+# Import module snippets
+from ansible.module_utils.basic import *
+
main()
diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py
index 3e42ec12..1c015a40 100644
--- a/cloud/webfaction/webfaction_app.py
+++ b/cloud/webfaction/webfaction_app.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python
+#!/usr/bin/python
#
# Create a Webfaction application using Ansible and the Webfaction API
#
@@ -7,7 +7,9 @@
#
# ------------------------------------------
#
-# (c) Quentin Stafford-Fraser 2015
+# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
#
# This file is part of Ansible
#
@@ -80,6 +82,12 @@ options:
description:
- The webfaction password to use
required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ required: false
+
'''
EXAMPLES = '''
@@ -90,6 +98,7 @@ EXAMPLES = '''
type=mod_wsgi35-python27
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
+ machine={{webfaction_machine}}
'''
import xmlrpclib
@@ -108,6 +117,7 @@ def main():
port_open = dict(required=False, choices=BOOLEANS, default=False),
login_name = dict(required=True),
login_password = dict(required=True),
+ machine = dict(required=False, default=False),
),
supports_check_mode=True
)
@@ -115,10 +125,17 @@ def main():
app_type = module.params['type']
app_state = module.params['state']
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
app_list = webfaction.list_apps(session_id)
app_map = dict([(i['name'], i) for i in app_list])
diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py
index f4204907..6c45e700 100644
--- a/cloud/webfaction/webfaction_db.py
+++ b/cloud/webfaction/webfaction_db.py
@@ -1,10 +1,12 @@
-#! /usr/bin/python
+#!/usr/bin/python
#
# Create a webfaction database using Ansible and the Webfaction API
#
# ------------------------------------------
#
-# (c) Quentin Stafford-Fraser and Andy Baker 2015
+# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
#
# This file is part of Ansible
#
@@ -68,6 +70,11 @@ options:
description:
- The webfaction password to use
required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ required: false
'''
EXAMPLES = '''
@@ -81,6 +88,7 @@ EXAMPLES = '''
type: mysql
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
# Note that, for symmetry's sake, deleting a database using
# 'state: absent' will also delete the matching user.
@@ -103,6 +111,7 @@ def main():
password = dict(required=False, default=None),
login_name = dict(required=True),
login_password = dict(required=True),
+ machine = dict(required=False, default=False),
),
supports_check_mode=True
)
@@ -111,10 +120,17 @@ def main():
db_type = module.params['type']
db_passwd = module.params['password']
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
db_list = webfaction.list_dbs(session_id)
db_map = dict([(i['name'], i) for i in db_list])
@@ -130,7 +146,7 @@ def main():
if db_state == 'present':
- # Does an database with this name already exist?
+ # Does a database with this name already exist?
if existing_db:
# Yes, but of a different type - fail
if existing_db['db_type'] != db_type:
diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py
index 0b35faf1..c809dd6b 100644
--- a/cloud/webfaction/webfaction_domain.py
+++ b/cloud/webfaction/webfaction_domain.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python
+#!/usr/bin/python
#
# Create Webfaction domains and subdomains using Ansible and the Webfaction API
#
diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py
index 7547b615..c08bd477 100644
--- a/cloud/webfaction/webfaction_mailbox.py
+++ b/cloud/webfaction/webfaction_mailbox.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python
+#!/usr/bin/python
#
# Create webfaction mailbox using Ansible and the Webfaction API
#
diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py
index 57eae39c..bb1bfb94 100644
--- a/cloud/webfaction/webfaction_site.py
+++ b/cloud/webfaction/webfaction_site.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python
+#!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
diff --git a/clustering/__init__.py b/clustering/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/clustering/__init__.py
diff --git a/clustering/consul.py b/clustering/consul.py
index 08317323..11651757 100644
--- a/clustering/consul.py
+++ b/clustering/consul.py
@@ -19,30 +19,30 @@
DOCUMENTATION = """
module: consul
-short_description: "Add, modify & delete services within a consul cluster.
- See http://consul.io for more details."
+short_description: "Add, modify & delete services within a consul cluster."
description:
- - registers services and checks for an agent with a consul cluster. A service
- is some process running on the agent node that should be advertised by
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
consul's discovery mechanism. It may optionally supply a check definition,
a periodic service test to notify the consul cluster of service's health.
- Checks may also be registered per node e.g. disk usage, or cpu usage and
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
notify the health of the entire node to the cluster.
Service level checks do not require a check name or id as these are derived
- by Consul from the Service name and id respectively by appending 'service:'.
- Node level checks require a check_name and optionally a check_id.
- Currently, there is no complete way to retrieve the script, interval or ttl
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a check_name and optionally a check_id."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
metadata for a registered check. Without this metadata it is not possible to
- tell if the data supplied with ansible represents a change to a check. As a
- result this does not attempt to determine changes and will always report a
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
changed occurred. An api method is planned to supply this metadata so at that
stage change management will be added.
+ - "See http://consul.io for more details."
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
-author: "Steve Gargan (@sgargan)"
+author: "Steve Gargan (@sgargan)"
options:
state:
description:
@@ -50,7 +50,7 @@ options:
required: true
choices: ['present', 'absent']
service_name:
- desciption:
+ description:
- Unique name for the service on a node, must be unique per node,
required if registering a service. May be ommitted if registering
a node level check
@@ -95,11 +95,11 @@ options:
interval:
description:
- the interval at which the service check will be run. This is a number
- with a s or m suffix to signify the units of seconds or minutes e.g
- 15s or 1m. If no suffix is supplied, m will be used by default e.g.
+ with a s or m suffix to signify the units of seconds or minutes e.g
+ 15s or 1m. If no suffix is supplied, m will be used by default e.g.
1 will be 1m. Required if the script param is specified.
required: false
- default: None
+ default: None
check_id:
description:
- an ID for the service check, defaults to the check name, ignored if
@@ -113,20 +113,19 @@ options:
required: false
default: None
ttl:
- description:
+ description:
- checks can be registered with a ttl instead of a script and interval
this means that the service will check in with the agent before the
- ttl expires. If it doesn't the check will be considered failed.
+ ttl expires. If it doesn't the check will be considered failed.
Required if registering a check and the script an interval are missing
- Similar to the interval this is a number with a s or m suffix to
- signify the units of seconds or minutes e.g 15s or 1m. If no suffix
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes e.g 15s or 1m. If no suffix
is supplied, m will be used by default e.g. 1 will be 1m
required: false
default: None
token:
description:
- - the token key indentifying an ACL rule set. May be required to
- register services.
+ - the token key indentifying an ACL rule set. May be required to register services.
required: false
default: None
"""
diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py
index 2ba3a031..b0d07dda 100644
--- a/clustering/consul_kv.py
+++ b/clustering/consul_kv.py
@@ -19,14 +19,14 @@
DOCUMENTATION = """
module: consul_kv
-short_description: "manipulate entries in the key/value store of a consul
- cluster. See http://www.consul.io/docs/agent/http.html#kv for more details."
+short_description: Manipulate entries in the key/value store of a consul cluster.
description:
- - allows the addition, modification and deletion of key/value entries in a
+ - Allows the addition, modification and deletion of key/value entries in a
consul cluster via the agent. The entire contents of the record, including
- the indices, flags and session are returned as 'value'. If the key
- represents a prefix then Note that when a value is removed, the existing
+ the indices, flags and session are returned as 'value'.
+ - If the key represents a prefix then Note that when a value is removed, the existing
value if any is returned as part of the results.
+ - "See http://www.consul.io/docs/agent/http.html#kv for more details."
requirements:
- "python >= 2.6"
- python-consul
diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py
index ede80049..0529abde 100644
--- a/database/misc/mongodb_user.py
+++ b/database/misc/mongodb_user.py
@@ -225,10 +225,10 @@ def main():
update_password = module.params['update_password']
try:
- if replica_set:
- client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
- else:
- client = MongoClient(login_host, int(login_port), ssl=ssl)
+ if replica_set:
+ client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
+ else:
+ client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
diff --git a/files/patch.py b/files/patch.py
index 60629c92..576333c3 100644
--- a/files/patch.py
+++ b/files/patch.py
@@ -70,6 +70,12 @@ options:
description:
- passes --backup --version-control=numbered to patch,
producing numbered backup copies
+ binary:
+ version_added: "2.0"
+ description:
+ - Setting to true will disable patch's heuristic for transforming CRLF
+ line endings into LF. Line endings of src and dest must match. If set to
+ False, patch will replace CRLF in src files on POSIX.
required: false
type: "bool"
default: "False"
@@ -98,10 +104,12 @@ class PatchError(Exception):
pass
-def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0):
+def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0):
opts = ['--quiet', '--reverse', '--forward', '--dry-run',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
+ if binary:
+ opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
@@ -109,12 +117,14 @@ def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0)
return rc == 0
-def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False, backup=False):
+def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
opts.append('--dry-run')
+ if binary:
+ opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
@@ -136,7 +146,8 @@ def main():
'remote_src': {'default': False, 'type': 'bool'},
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
- 'backup': { 'default': False, 'type': 'bool' }
+ 'backup': {'default': False, 'type': 'bool'},
+ 'binary': {'default': False, 'type': 'bool'},
},
required_one_of=[['dest', 'basedir']],
supports_check_mode=True
@@ -167,9 +178,9 @@ def main():
p.src = os.path.abspath(p.src)
changed = False
- if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip):
+ if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip):
try:
- apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip,
+ apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup )
changed = True
except PatchError, e:
diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py
index 3b54e55e..a58df024 100644
--- a/monitoring/airbrake_deployment.py
+++ b/monitoring/airbrake_deployment.py
@@ -61,8 +61,7 @@ options:
default: 'yes'
choices: ['yes', 'no']
-# informational: requirements for nodes
-requirements: [ urllib, urllib2 ]
+requirements: []
'''
EXAMPLES = '''
@@ -72,6 +71,8 @@ EXAMPLES = '''
revision=4.2
'''
+import urllib
+
# ===========================================
# Module execution.
#
diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py
index 88d3bb81..c606dfdc 100644
--- a/monitoring/librato_annotation.py
+++ b/monitoring/librato_annotation.py
@@ -31,7 +31,6 @@ description:
version_added: "1.6"
author: "Seth Edwards (@sedward)"
requirements:
- - urllib2
- base64
options:
user:
@@ -107,11 +106,7 @@ EXAMPLES = '''
'''
-try:
- import urllib2
- HAS_URLLIB2 = True
-except ImportError:
- HAS_URLLIB2 = False
+import urllib2
def post_annotation(module):
user = module.params['user']
diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py
index 832e467d..3d9bc6c0 100644
--- a/monitoring/newrelic_deployment.py
+++ b/monitoring/newrelic_deployment.py
@@ -72,8 +72,7 @@ options:
choices: ['yes', 'no']
version_added: 1.5.1
-# informational: requirements for nodes
-requirements: [ urllib, urllib2 ]
+requirements: []
'''
EXAMPLES = '''
@@ -83,6 +82,8 @@ EXAMPLES = '''
revision=1.0
'''
+import urllib
+
# ===========================================
# Module execution.
#
diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py
index 24c622c8..b35cfbf4 100644
--- a/monitoring/pagerduty.py
+++ b/monitoring/pagerduty.py
@@ -11,6 +11,7 @@ author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
- "Justin Johns"
+ - "Bruce Pennypacker"
requirements:
- PagerDuty API access
options:
@@ -19,7 +20,7 @@ options:
- Create a maintenance window or get a list of ongoing windows.
required: true
default: null
- choices: [ "running", "started", "ongoing" ]
+ choices: [ "running", "started", "ongoing", "absent" ]
aliases: []
name:
description:
@@ -61,11 +62,11 @@ options:
version_added: '1.8'
service:
description:
- - PagerDuty service ID.
+ - A comma separated list of PagerDuty service IDs.
required: false
default: null
choices: []
- aliases: []
+ aliases: [ services ]
hours:
description:
- Length of maintenance window in hours.
@@ -96,9 +97,6 @@ options:
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
-
-notes:
- - This module does not yet have support to end maintenance windows.
'''
EXAMPLES='''
@@ -132,6 +130,14 @@ EXAMPLES='''
service=FOO123
hours=4
desc=deployment
+ register: pd_window
+
+# Delete the previous maintenance window
+- pagerduty: name=companyabc
+ user=example@example.com
+ passwd=password123
+ state=absent
+ service={{ pd_window.result.maintenance_window.id }}
'''
import datetime
@@ -152,7 +158,12 @@ def ongoing(module, name, user, passwd, token):
if info['status'] != 200:
module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
- return False, response.read()
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, False
def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
@@ -166,7 +177,8 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
- request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}}
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
+
if requester_id:
request_data['requester_id'] = requester_id
else:
@@ -178,19 +190,50 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
if info['status'] != 200:
module.fail_json(msg="failed to create the window: %s" % info['msg'])
- return False, response.read()
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, True
+
+def absent(module, name, user, passwd, token, requester_id, service):
+ url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
+ headers = {
+ 'Authorization': auth_header(user, passwd, token),
+ 'Content-Type' : 'application/json',
+ }
+ request_data = {}
+
+ if requester_id:
+ request_data['requester_id'] = requester_id
+ else:
+ if token:
+ module.fail_json(msg="requester_id is required when using a token")
+
+ data = json.dumps(request_data)
+ response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ try:
+ json_out = json.loads(response.read())
+ except:
+ json_out = ""
+
+ return False, json_out, True
def main():
module = AnsibleModule(
argument_spec=dict(
- state=dict(required=True, choices=['running', 'started', 'ongoing']),
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
name=dict(required=True),
user=dict(required=False),
passwd=dict(required=False),
token=dict(required=False),
- service=dict(required=False),
+ service=dict(required=False, type='list', aliases=["services"]),
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
@@ -217,15 +260,21 @@ def main():
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
- (rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
+ (rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed=True
if state == "ongoing":
- (rc, out) = ongoing(module, name, user, passwd, token)
+ (rc, out, changed) = ongoing(module, name, user, passwd, token)
+
+ if state == "absent":
+ (rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)
if rc != 0:
module.fail_json(msg="failed", result=out)
- module.exit_json(msg="success", result=out)
+
+ module.exit_json(msg="success", result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py
index 43e2aa00..060193b7 100644
--- a/monitoring/rollbar_deployment.py
+++ b/monitoring/rollbar_deployment.py
@@ -76,6 +76,7 @@ EXAMPLES = '''
comment='Test Deploy'
'''
+import urllib
def main():
diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py
new file mode 100644
index 00000000..a1bd36ca
--- /dev/null
+++ b/monitoring/sensu_check.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+version_added: 2.0
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ description: Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ required: false
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ command:
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ required: true
+ handlers:
+ description:
+ - List of handlers to notify when the check fails
+ required: false
+ default: []
+ subscribers:
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ required: false
+ default: []
+ interval:
+ description:
+ - Check interval in seconds
+ required: false
+ default: null
+ timeout:
+ description:
+ - Timeout for the check
+ required: false
+ default: 10
+ handle:
+ description:
+ - Whether the check should be handled or not
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+ subdue_begin:
+ description:
+ - When to disable handling of check failures
+ required: false
+ default: null
+ subdue_end:
+ description:
+ - When to enable handling of check failures
+ required: false
+ default: null
+ dependencies:
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ required: false
+ default: []
+ metric:
+ description: Whether the check is a metric
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: yes
+ occurrences:
+ description:
+ - Number of event occurrences before the handler should take action
+ required: false
+ default: 1
+ refresh:
+ description:
+ - Number of seconds handlers should wait before taking second action
+ required: false
+ default: null
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+ low_flap_threshold:
+ description:
+ - The low threshhold for flap detection
+ required: false
+ default: null
+ high_flap_threshold:
+ description:
+ - The low threshhold for flap detection
+ required: false
+ default: null
+requirements: [ ]
+author: Anders Ingemann
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: get cpu metrics
+ sensu_check: name=cpu_load
+ command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric=yes handlers=relay subscribers=common interval=60
+
+# Check whether nginx is running
+- name: check nginx process
+ sensu_check: name=nginx_running
+ command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
+ handlers=default subscribers=nginx interval=60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: check disk
+ sensu_check: name=check_disk_capacity
+'''
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ import json
+ except ImportError:
+ import simplejson as json
+
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream.read())
+ except IOError, e:
+ if e.errno is 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=str(e))
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError, e:
+ module.fail_json(msg=str(e))
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py
index 772e92cb..6fac82c7 100644
--- a/monitoring/zabbix_host.py
+++ b/monitoring/zabbix_host.py
@@ -79,6 +79,10 @@ options:
description:
- The timeout of API request (seconds).
default: 10
+ proxy:
+ description:
+ - The name of the Zabbix Proxy to be used
+ default: None
interfaces:
description:
- List of interfaces to be created for the host (see example below).
@@ -118,6 +122,7 @@ EXAMPLES = '''
ip: 10.xx.xx.xx
dns: ""
port: 12345
+ proxy: a.zabbix.proxy
'''
import logging
@@ -174,21 +179,25 @@ class Host(object):
template_ids.append(template_id)
return template_ids
- def add_host(self, host_name, group_ids, status, interfaces):
+ def add_host(self, host_name, group_ids, status, interfaces, proxy_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
- host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status})
+ parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception, e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
- def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list):
+ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
- self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status})
+ parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id}
+ self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
@@ -234,6 +243,14 @@ class Host(object):
else:
return host_list[0]
+ # get proxyid by proxy name
+ def get_proxyid_by_proxy_name(self, proxy_name):
+ proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
+ if len(proxy_list) < 1:
+ self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
+ else:
+ return proxy_list[0]['proxyid']
+
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
@@ -294,7 +311,7 @@ class Host(object):
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
- exist_interfaces, host):
+ exist_interfaces, host, proxy_id):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
@@ -314,6 +331,9 @@ class Host(object):
if set(list(template_ids)) != set(exist_template_ids):
return True
+ if host['proxy_hostid'] != proxy_id:
+ return True
+
return False
# link or clear template of the host
@@ -349,7 +369,8 @@ def main():
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10),
- interfaces=dict(required=False)
+ interfaces=dict(required=False),
+ proxy=dict(required=False)
),
supports_check_mode=True
)
@@ -367,6 +388,7 @@ def main():
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
+ proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
@@ -396,6 +418,11 @@ def main():
if interface['type'] == 1:
ip = interface['ip']
+ proxy_id = "0"
+
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+
# check if host exist
is_host_exist = host.is_host_exist(host_name)
@@ -421,10 +448,10 @@ def main():
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
- exist_interfaces, zabbix_host_obj):
+ exist_interfaces, zabbix_host_obj, proxy_id):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
- interfaces, exist_interfaces)
+ interfaces, exist_interfaces, proxy_id)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
@@ -432,8 +459,8 @@ def main():
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
- exist_interfaces_copy, zabbix_host_obj):
- host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces)
+ exist_interfaces_copy, zabbix_host_obj, proxy_id):
+ host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
@@ -448,7 +475,7 @@ def main():
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
- host_id = host.add_host(host_name, group_ids, status, interfaces)
+ host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py
index 61bc3535..384a625b 100644
--- a/network/citrix/netscaler.py
+++ b/network/citrix/netscaler.py
@@ -81,7 +81,7 @@ options:
default: 'yes'
choices: ['yes', 'no']
-requirements: [ "urllib", "urllib2" ]
+requirements: []
author: "Nandor Sivok (@dominis)"
'''
@@ -99,7 +99,7 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api
import base64
import socket
-
+import urllib
class netscaler(object):
diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py
index fcc7232a..cce7bd10 100644
--- a/network/dnsmadeeasy.py
+++ b/network/dnsmadeeasy.py
@@ -86,7 +86,7 @@ notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
-requirements: [ urllib, urllib2, hashlib, hmac ]
+requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
@@ -113,6 +113,8 @@ EXAMPLES = '''
# DNSMadeEasy module specific support methods.
#
+import urllib
+
IMPORT_ERROR = None
try:
import json
diff --git a/network/haproxy.py b/network/haproxy.py
index 690aa60b..6d4f6a42 100644
--- a/network/haproxy.py
+++ b/network/haproxy.py
@@ -78,13 +78,13 @@ options:
description:
- number of times to check for status after changing the state
required: false
- default: 20
+ default: 25
version_added: "2.0"
wait_interval:
description:
- number of seconds to wait between retries
required: false
- default: 1
+ default: 5
version_added: "2.0"
'''
@@ -129,8 +129,8 @@ import time
DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock"
RECV_SIZE = 1024
ACTION_CHOICES = ['enabled', 'disabled']
-WAIT_RETRIES=20
-WAIT_INTERVAL=1
+WAIT_RETRIES=25
+WAIT_INTERVAL=5
######################################################################
class TimeoutException(Exception):
@@ -302,9 +302,9 @@ def main():
weight=dict(required=False, default=None),
socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION),
shutdown_sessions=dict(required=False, default=False),
- wait=dict(required=False, default=False),
- wait_retries=dict(required=False, default=WAIT_RETRIES),
- wait_interval=dict(required=False, default=WAIT_INTERVAL),
+ wait=dict(required=False, default=False, type='bool'),
+ wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
+ wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
),
)
diff --git a/notification/flowdock.py b/notification/flowdock.py
index 7c42e586..34dad8db 100644
--- a/notification/flowdock.py
+++ b/notification/flowdock.py
@@ -85,8 +85,7 @@ options:
choices: ['yes', 'no']
version_added: 1.5.1
-# informational: requirements for nodes
-requirements: [ urllib, urllib2 ]
+requirements: [ ]
'''
EXAMPLES = '''
@@ -104,6 +103,8 @@ EXAMPLES = '''
tags=tag1,tag2,tag3
'''
+import urllib
+
# ===========================================
# Module execution.
#
diff --git a/notification/grove.py b/notification/grove.py
index 85601d1c..4e4a0b5b 100644
--- a/notification/grove.py
+++ b/notification/grove.py
@@ -49,6 +49,8 @@ EXAMPLES = '''
message=deployed {{ target }}
'''
+import urllib
+
BASE_URL = 'https://grove.io/api/notice/%s/'
# ==============================================================
diff --git a/notification/hall.py b/notification/hall.py
index 7c76e523..05c1a981 100755
--- a/notification/hall.py
+++ b/notification/hall.py
@@ -18,18 +18,18 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
+
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
- - The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms.
-version_added: 1.6
-author: Billy Kimble <basslines@gmail.com>
+ - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
+version_added: "2.0"
+author: Billy Kimble (@bkimble) <basslines@gmail.com>
options:
room_token:
description:
- - Room token provided to you by setting up the Ansible room integation on U(https://hall.com)
+ - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
required: true
msg:
description:
@@ -41,12 +41,12 @@ options:
required: true
picture:
description:
- - The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)
+ - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
required: false
-"""
+"""
EXAMPLES = """
-- name: Send Hall notifiation
+- name: Send Hall notifiation
local_action:
module: hall
room_token: <hall room integration token>
@@ -57,7 +57,7 @@ EXAMPLES = """
when: ec2.instances|length > 0
local_action:
module: hall
- room_token: <hall room integration token>
+ room_token: <hall room integration token>
title: Server Creation
msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region."
with_items: ec2.instances
@@ -66,7 +66,7 @@ EXAMPLES = """
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
- headers = {'Content-Type': 'application/json'}
+ headers = {'Content-Type': 'application/json'}
payload=module.jsonify(payload)
api_endpoint = HALL_API_ENDPOINT % (room_token)
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
@@ -83,7 +83,7 @@ def main():
picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
)
)
-
+
room_token = module.params['room_token']
message = module.params['msg']
title = module.params['title']
diff --git a/notification/hipchat.py b/notification/hipchat.py
index 2498c118..57e97eae 100644
--- a/notification/hipchat.py
+++ b/notification/hipchat.py
@@ -5,7 +5,7 @@ DOCUMENTATION = '''
---
module: hipchat
version_added: "1.2"
-short_description: Send a message to hipchat
+short_description: Send a message to hipchat.
description:
- Send a message to hipchat
options:
@@ -56,30 +56,38 @@ options:
version_added: 1.5.1
api:
description:
- - API url if using a self-hosted hipchat server
+ - API url if using a self-hosted hipchat server. For hipchat api version 2 use C(/v2) path in URI
required: false
default: 'https://api.hipchat.com/v1'
version_added: 1.6.0
-# informational: requirements for nodes
-requirements: [ urllib, urllib2 ]
+requirements: [ ]
author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)"
'''
EXAMPLES = '''
-- hipchat: token=AAAAAA room=notify msg="Ansible task finished"
+- hipchat: room=notify msg="Ansible task finished"
+
+# Use Hipchat API version 2
+
+- hipchat:
+ api: "https://api.hipchat.com/v2/"
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: "Ansible task finished"
'''
# ===========================================
# HipChat module specific support methods.
#
+import urllib
+
DEFAULT_URI = "https://api.hipchat.com/v1"
MSG_URI_V1 = "/rooms/message"
-MSG_URI_V2 = "/room/{id_or_name}/message"
NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
@@ -94,12 +102,8 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
params['message_format'] = msg_format
params['color'] = color
params['api'] = api
-
- if notify:
- params['notify'] = 1
- else:
- params['notify'] = 0
-
+ params['notify'] = int(notify)
+
url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
data = urllib.urlencode(params)
@@ -115,7 +119,7 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=MSG_URI_V2):
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
'''sending message to hipchat v2 server'''
print "Sending message to v2 server"
@@ -125,13 +129,11 @@ def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
body['message'] = msg
body['color'] = color
body['message_format'] = msg_format
+ params['notify'] = notify
- if notify:
- POST_URL = api + NOTIFY_URI_V2
- else:
- POST_URL = api + MSG_URI_V2
-
- url = POST_URL.replace('{id_or_name}',room)
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', room)
data = json.dumps(body)
if module.check_mode:
diff --git a/notification/irc.py b/notification/irc.py
index e6852c85..7e34049c 100644
--- a/notification/irc.py
+++ b/notification/irc.py
@@ -39,7 +39,7 @@ options:
default: 6667
nick:
description:
- - Nickname. May be shortened, depending on server's NICKLEN setting.
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
required: false
default: ansible
msg:
@@ -52,7 +52,7 @@ options:
- Set the channel topic
required: false
default: null
- version_added: 2.0
+ version_added: "2.0"
color:
description:
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
@@ -61,13 +61,19 @@ options:
choices: [ "none", "yellow", "red", "green", "blue", "black" ]
channel:
description:
- - Channel name
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
required: true
+ nick_to:
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ required: false
+ default: null
+ version_added: "2.0"
key:
description:
- Channel key
required: false
- version_added: 1.7
+ version_added: "1.7"
passwd:
description:
- Server password
@@ -77,12 +83,12 @@ options:
- Timeout to use while waiting for successful registration and join
messages, this is to prevent an endless loop
default: 30
- version_added: 1.5
+ version_added: "1.5"
use_ssl:
description:
- Designates whether TLS/SSL should be used when connecting to the IRC server
default: False
- version_added: 1.8
+ version_added: "1.8"
# informational: requirements for nodes
requirements: [ socket ]
@@ -95,10 +101,19 @@ EXAMPLES = '''
- irc: server=irc.example.net channel="#t1" msg="Hello world"
- local_action: irc port=6669
+ server="irc.example.net"
channel="#t1"
msg="All finished at {{ ansible_date_time.iso8601 }}"
color=red
nick=ansibleIRC
+
+- local_action: irc port=6669
+ server="irc.example.net"
+ channel="#t1"
+ nick_to=["nick1", "nick2"]
+ msg="All finished at {{ ansible_date_time.iso8601 }}"
+ color=red
+ nick=ansibleIRC
'''
# ===========================================
@@ -112,7 +127,7 @@ import ssl
from time import sleep
-def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None,
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None,
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False):
'''send message to IRC'''
@@ -173,7 +188,11 @@ def send_msg(channel, msg, server='localhost', port='6667', key=None, topic=None
irc.send('TOPIC %s :%s\r\n' % (channel, topic))
sleep(1)
- irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
+ if nick_to:
+ for nick in nick_to:
+ irc.send('PRIVMSG %s :%s\r\n' % (nick, message))
+ if channel:
+ irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
sleep(1)
irc.send('PART %s\r\n' % channel)
irc.send('QUIT\r\n')
@@ -191,33 +210,38 @@ def main():
server=dict(default='localhost'),
port=dict(default=6667),
nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list'),
msg=dict(required=True),
color=dict(default="none", choices=["yellow", "red", "green",
"blue", "black", "none"]),
- channel=dict(required=True),
+ channel=dict(required=False),
key=dict(),
topic=dict(),
passwd=dict(),
timeout=dict(type='int', default=30),
use_ssl=dict(type='bool', default=False)
),
- supports_check_mode=True
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
)
server = module.params["server"]
port = module.params["port"]
nick = module.params["nick"]
- topic = module.params["topic"]
+ nick_to = module.params["nick_to"]
msg = module.params["msg"]
color = module.params["color"]
channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
key = module.params["key"]
passwd = module.params["passwd"]
timeout = module.params["timeout"]
use_ssl = module.params["use_ssl"]
try:
- send_msg(channel, msg, server, port, key, topic, nick, color, passwd, timeout, use_ssl)
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl)
except Exception, e:
module.fail_json(msg="unable to send to IRC: %s" % e)
diff --git a/notification/mail.py b/notification/mail.py
index c42e80fd..8be9a589 100644
--- a/notification/mail.py
+++ b/notification/mail.py
@@ -110,6 +110,12 @@ options:
- The character set of email being sent
default: 'us-ascii'
required: false
+ subtype:
+ description:
+ - The minor mime type, can be either text or html. The major type is always text.
+ default: 'plain'
+ required: false
+ version_added: "2.0"
"""
EXAMPLES = '''
@@ -183,7 +189,8 @@ def main():
body = dict(default=None),
attach = dict(default=None),
headers = dict(default=None),
- charset = dict(default='us-ascii')
+ charset = dict(default='us-ascii'),
+ subtype = dict(default='plain')
)
)
@@ -200,6 +207,7 @@ def main():
attach_files = module.params.get('attach')
headers = module.params.get('headers')
charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
sender_phrase, sender_addr = parseaddr(sender)
if not body:
@@ -259,7 +267,7 @@ def main():
if len(cc_list) > 0:
msg['Cc'] = ", ".join(cc_list)
- part = MIMEText(body + "\n\n", _charset=charset)
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
msg.attach(part)
if attach_files is not None:
diff --git a/notification/nexmo.py b/notification/nexmo.py
index d0c3d05e..89a246c0 100644
--- a/notification/nexmo.py
+++ b/notification/nexmo.py
@@ -71,6 +71,7 @@ EXAMPLES = """
msg: "{{ inventory_hostname }} completed"
"""
+import urllib
NEXMO_API = 'https://rest.nexmo.com/sms/json'
diff --git a/notification/sendgrid.py b/notification/sendgrid.py
index 78806687..e1ae7b77 100644
--- a/notification/sendgrid.py
+++ b/notification/sendgrid.py
@@ -84,10 +84,8 @@ EXAMPLES = '''
# =======================================
# sendgrid module support methods
#
-try:
- import urllib, urllib2
-except ImportError:
- module.fail_json(msg="urllib and urllib2 are required")
+import urllib
+import urllib2
import base64
diff --git a/notification/slack.py b/notification/slack.py
index baabe4f5..ba4ed2e4 100644
--- a/notification/slack.py
+++ b/notification/slack.py
@@ -177,7 +177,7 @@ def main():
module = AnsibleModule(
argument_spec = dict(
domain = dict(type='str', required=False, default=None),
- token = dict(type='str', required=True),
+ token = dict(type='str', required=True, no_log=True),
msg = dict(type='str', required=True),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
diff --git a/notification/twilio.py b/notification/twilio.py
index e9ec5bcf..ee12d987 100644
--- a/notification/twilio.py
+++ b/notification/twilio.py
@@ -104,10 +104,8 @@ EXAMPLES = '''
# =======================================
# twilio module support methods
#
-try:
- import urllib, urllib2
-except ImportError:
- module.fail_json(msg="urllib and urllib2 are required")
+import urllib
+import urllib2
import base64
diff --git a/notification/typetalk.py b/notification/typetalk.py
index 638f97ae..002c8b5c 100644
--- a/notification/typetalk.py
+++ b/notification/typetalk.py
@@ -25,7 +25,7 @@ options:
description:
- message body
required: true
-requirements: [ urllib, urllib2, json ]
+requirements: [ json ]
author: "Takashi Someda (@tksmd)"
'''
@@ -33,15 +33,9 @@ EXAMPLES = '''
- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed"
'''
-try:
- import urllib
-except ImportError:
- urllib = None
+import urllib
-try:
- import urllib2
-except ImportError:
- urllib2 = None
+import urllib2
try:
import json
@@ -96,8 +90,8 @@ def main():
supports_check_mode=False
)
- if not (urllib and urllib2 and json):
- module.fail_json(msg="urllib, urllib2 and json modules are required")
+ if not json:
+ module.fail_json(msg="json module is required")
client_id = module.params["client_id"]
client_secret = module.params["client_secret"]
diff --git a/packaging/dpkg_selections b/packaging/dpkg_selections.py
index f09ff9a9..f09ff9a9 100644
--- a/packaging/dpkg_selections
+++ b/packaging/dpkg_selections.py
diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py
new file mode 100644
index 00000000..7b092a13
--- /dev/null
+++ b/packaging/elasticsearch_plugin.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import os
+
+"""
+Ansible module to manage elasticsearch plugins
+(c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+version_added: "2.0"
+author: Mathew Davies (@ThePixelDeveloper)
+options:
+ name:
+ description:
+ - Name of the plugin to install
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ required: False
+ choices: [present, absent]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from
+ required: False
+ default: None
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ required: False
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the plugin binary
+ required: False
+ default: /usr/share/elasticsearch/bin/plugin
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ required: False
+ default: /usr/share/elasticsearch/plugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+ required: False
+ default: None
+'''
+
+EXAMPLES = '''
+# Install Elasticsearch head plugin
+- elasticsearch_plugin: state=present name="mobz/elasticsearch-head"
+
+# Install specific version of a plugin
+- elasticsearch_plugin: state=present name="com.github.kzwang/elasticsearch-image" version="1.2.0"
+
+# Uninstall Elasticsearch head plugin
+- elasticsearch_plugin: state=absent name="mobz/elasticsearch-head"
+'''
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+
+def parse_error(string):
+ reason = "reason: "
+ return string[string.index(reason) + len(reason):].strip()
+
+
+def main():
+
+ package_state_map = dict(
+ present="--install",
+ absent="--remove"
+ )
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=package_state_map.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/"),
+ version=dict(default=None)
+ )
+ )
+
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ version = module.params["version"]
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name)
+
+ if (version):
+ name = name + '/' + version
+
+ cmd_args = [plugin_bin, package_state_map[state], name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py
new file mode 100644
index 00000000..f4aeff41
--- /dev/null
+++ b/packaging/language/bundler.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+DOCUMENTATION='''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+version_added: "2.0.0"
+options:
+ executable:
+ description:
+ - The path to the bundler executable
+ required: false
+ default: null
+ state:
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ required: false
+ choices: [present, latest]
+ default: present
+ chdir:
+ description:
+ - The directory to execute the bundler commands from. This directoy
+ needs to contain a valid Gemfile or .bundle/ directory
+ required: false
+ default: temporary working directory
+ exclude_groups:
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ required: false
+ default: null
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ required: false
+ choices: [yes, no]
+ default: "no"
+ gemfile:
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ required: false
+ default: Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ required: false
+ choices: [yes, no]
+ default: "no"
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will only install gems
+ that are in the default or production groups. Requires a Gemfile.lock
+ file to have been created prior
+ required: false
+ choices: [yes, no]
+ default: "no"
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ required: false
+ choices: [yes, no]
+ default: "yes"
+ gem_path:
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ required: false
+ default: RubyGems gem paths
+ binstub_directory:
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ required: false
+ default: null
+ extra_args:
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+ required: false
+ default: null
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES='''
+# Installs gems from a Gemfile in the current directory
+- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle
+
+# Excludes the production group from installing
+- bundler: state=present exclude_groups=production
+
+# Only install gems from the default and production groups
+- bundler: state=present deployment=yes
+
+# Installs gems using a Gemfile in another directory
+- bundler: state=present gemfile=../rails_project/Gemfile
+
+# Updates Gemfile in another directory
+- bundler: state=latest chdir=~/rails_project
+'''
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ return module.params.get('executable').split(' ')
+ else:
+ return [ module.get_bin_path('bundle', True) ]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False),
+ exclude_groups=dict(default=None, required=False, type='list'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False),
+ binstub_directory=dict(default=None, required=False),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_install_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py
index 1b5d0bb0..1f331261 100644
--- a/packaging/os/openbsd_pkg.py
+++ b/packaging/os/openbsd_pkg.py
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# (c) 2013, Patrik Lundin <patrik.lundin.swe@gmail.com>
+# (c) 2013, Patrik Lundin <patrik@sigterm.se>
#
# This file is part of Ansible
#
diff --git a/packaging/os/portage.py b/packaging/os/portage.py
index 2ce0379a..7be55db3 100644
--- a/packaging/os/portage.py
+++ b/packaging/os/portage.py
@@ -254,6 +254,8 @@ def emerge_packages(module, packages):
break
else:
module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
args = []
emerge_flags = {
@@ -269,14 +271,14 @@ def emerge_packages(module, packages):
'verbose': '--verbose',
'getbinpkg': '--getbinpkg',
'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
}
for flag, arg in emerge_flags.iteritems():
if p[flag]:
args.append(arg)
- # usepkgonly implies getbinpkg
- if p['usepkgonly'] and not p['getbinpkg']:
- args.append('--getbinpkg')
+ if p['usepkg'] and p['usepkgonly']:
+ module.fail_json(msg='Use only one of usepkg, usepkgonly')
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
@@ -298,13 +300,18 @@ def emerge_packages(module, packages):
changed = True
for line in out.splitlines():
if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
break
else:
changed = False
+ msg = 'No packages installed.'
module.exit_json(
changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
- msg='Packages installed.',
+ msg=msg,
)
@@ -408,6 +415,7 @@ def main():
sync=dict(default=None, choices=['yes', 'web']),
getbinpkg=dict(default=None, choices=['yes']),
usepkgonly=dict(default=None, choices=['yes']),
+ usepkg=dict(default=None, choices=['yes']),
),
required_one_of=[['package', 'sync', 'depclean']],
mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']],
diff --git a/system/filesystem.py b/system/filesystem.py
index 1e867f30..b44168a0 100644
--- a/system/filesystem.py
+++ b/system/filesystem.py
@@ -41,6 +41,13 @@ options:
description:
- If yes, allows to create new filesystem on devices that already has filesystem.
required: false
+ resizefs:
+ choices: [ "yes", "no" ]
+ default: "no"
+ description:
+ - If yes, if the block device and filessytem size differ, grow the filesystem into the space. Note, XFS Will only grow if mounted.
+ required: false
+ version_added: "2.0"
opts:
description:
- List of options to be passed to mkfs command.
@@ -63,17 +70,68 @@ def main():
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default='no'),
+ resizefs=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ fs_cmd_map = {
+ 'ext2' : {
+ 'mkfs' : 'mkfs.ext2',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ },
+ 'ext3' : {
+ 'mkfs' : 'mkfs.ext3',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ },
+ 'ext4' : {
+ 'mkfs' : 'mkfs.ext4',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ },
+ 'ext4dev' : {
+ 'mkfs' : 'mkfs.ext4',
+ 'grow' : 'resize2fs',
+ 'grow_flag' : None,
+ 'force_flag' : '-F',
+ },
+ 'xfs' : {
+ 'mkfs' : 'mkfs.xfs',
+ 'grow' : 'xfs_growfs',
+ 'grow_flag' : None,
+ 'force_flag' : '-f',
+ },
+ 'btrfs' : {
+ 'mkfs' : 'mkfs.btrfs',
+ 'grow' : 'btrfs',
+ 'grow_flag' : 'filesystem resize',
+ 'force_flag' : '-f',
+ }
+ }
+
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.boolean(module.params['force'])
+ resizefs = module.boolean(module.params['resizefs'])
changed = False
+ try:
+ _ = fs_cmd_map[fstype]
+ except KeyError:
+ module.exit_json(changed=False, msg="WARNING: module does not support this filesystem yet. %s" % fstype)
+
+ mkfscmd = fs_cmd_map[fstype]['mkfs']
+ force_flag = fs_cmd_map[fstype]['force_flag']
+ growcmd = fs_cmd_map[fstype]['grow']
+
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found."%dev)
@@ -82,9 +140,21 @@ def main():
rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
fs = raw_fs.strip()
-
- if fs == fstype:
+ if fs == fstype and resizefs == False:
module.exit_json(changed=False)
+ elif fs == fstype and resizefs == True:
+ cmd = module.get_bin_path(growcmd, required=True)
+ if module.check_mode:
+ module.exit_json(changed=True, msg="May resize filesystem")
+ else:
+ rc,out,err = module.run_command("%s %s" % (cmd, dev))
+ # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on.
+ # in the future, you would have to parse the output to determine this.
+ # thankfully, these are safe operations if no change is made.
+ if rc == 0:
+ module.exit_json(changed=True, msg=out)
+ else:
+ module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err)
@@ -93,19 +163,13 @@ def main():
if module.check_mode:
changed = True
else:
- mkfs = module.get_bin_path('mkfs', required=True)
+ mkfs = module.get_bin_path(mkfscmd, required=True)
cmd = None
- if fstype in ['ext2', 'ext3', 'ext4', 'ext4dev']:
- force_flag="-F"
- elif fstype in ['xfs', 'btrfs']:
- force_flag="-f"
- else:
- force_flag=""
if opts is None:
- cmd = "%s -t %s %s '%s'" % (mkfs, fstype, force_flag, dev)
+ cmd = "%s %s '%s'" % (mkfs, force_flag, dev)
else:
- cmd = "%s -t %s %s %s '%s'" % (mkfs, fstype, force_flag, opts, dev)
+ cmd = "%s %s %s '%s'" % (mkfs, force_flag, opts, dev)
rc,_,err = module.run_command(cmd)
if rc == 0:
changed = True
diff --git a/system/firewalld.py b/system/firewalld.py
index 37ed1801..04dd4981 100644
--- a/system/firewalld.py
+++ b/system/firewalld.py
@@ -41,6 +41,12 @@ options:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
+ source:
+ description:
+ - 'The source/network you would like to add/remove to/from firewalld'
+ required: false
+ default: null
+ version_added: "2.0"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
@@ -78,6 +84,7 @@ EXAMPLES = '''
- firewalld: port=161-162/udp permanent=true state=enabled
- firewalld: zone=dmz service=http permanent=true state=enabled
- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
+- firewalld: source='192.168.1.0/24' zone=internal state=enabled
'''
import os
@@ -127,7 +134,27 @@ def set_port_disabled_permanent(zone, port, protocol):
fw_settings = fw_zone.getSettings()
fw_settings.removePort(port, protocol)
fw_zone.update(fw_settings)
-
+
+####################
+# source handling
+#
+def get_source(zone, source):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ if source in fw_settings.getSources():
+ return True
+ else:
+ return False
+
+def add_source(zone, source):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.addSource(source)
+
+def remove_source(zone, source):
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+ fw_settings.removeSource(source)
####################
# service handling
@@ -209,13 +236,16 @@ def main():
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
- permanent=dict(type='bool',required=True),
immediate=dict(type='bool',default=False),
+ source=dict(required=False,default=None),
+ permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
),
supports_check_mode=True
)
+ if module.params['source'] == None and module.params['permanent'] == None:
+ module.fail(msg='permanent is a required parameter')
if not HAS_FIREWALLD:
module.fail_json(msg='firewalld required for this module')
@@ -229,6 +259,7 @@ def main():
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
+ source = module.params['source']
if module.params['port'] != None:
port, protocol = module.params['port'].split('/')
@@ -308,6 +339,24 @@ def main():
if changed == True:
msgs.append("Changed service %s to %s" % (service, desired_state))
+ if source != None:
+ is_enabled = get_source(zone, source)
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ add_source(zone, source)
+ changed=True
+ msgs.append("Added %s to zone %s" % (source, zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ remove_source(zone, source)
+ changed=True
+ msgs.append("Removed %s from zone %s" % (source, zone))
if port != None:
if permanent:
is_enabled = get_port_enabled_permanent(zone, [port, protocol])
diff --git a/system/gluster_volume.py b/system/gluster_volume.py
index 77190065..ff1ce983 100644
--- a/system/gluster_volume.py
+++ b/system/gluster_volume.py
@@ -115,7 +115,7 @@ EXAMPLES = """
gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}'
- name: start gluster volume
- gluster_volume: status=started name=test1
+ gluster_volume: state=started name=test1
- name: limit usage
gluster_volume: state=present name=test1 directory=/foo quota=20.0MB
diff --git a/system/zfs.py b/system/zfs.py
index c3c87634..51b9db63 100644
--- a/system/zfs.py
+++ b/system/zfs.py
@@ -115,6 +115,11 @@ options:
- The normalization property.
required: False
choices: [none,formC,formD,formKC,formKD]
+ origin:
+ description:
+ - Name of the snapshot to clone
+ required: False
+ version_added: "2.0"
primarycache:
description:
- The primarycache property.
@@ -221,6 +226,12 @@ EXAMPLES = '''
# Create a new file system called myfs2 with snapdir enabled
- zfs: name=rpool/myfs2 state=present snapdir=enabled
+
+# Create a new file system by cloning a snapshot
+- zfs: name=rpool/cloned_fs state=present origin=rpool/myfs@mysnapshot
+
+# Destroy a filesystem
+- zfs: name=rpool/myfs state=absent
'''
@@ -253,8 +264,11 @@ class Zfs(object):
properties = self.properties
volsize = properties.pop('volsize', None)
volblocksize = properties.pop('volblocksize', None)
+ origin = properties.pop('origin', None)
if "@" in self.name:
action = 'snapshot'
+ elif origin:
+ action = 'clone'
else:
action = 'create'
@@ -272,6 +286,8 @@ class Zfs(object):
if volsize:
cmd.append('-V')
cmd.append(volsize)
+ if origin:
+ cmd.append(origin)
cmd.append(self.name)
(rc, err, out) = self.module.run_command(' '.join(cmd))
if rc == 0:
@@ -360,6 +376,7 @@ def main():
'mountpoint': {'required': False},
'nbmand': {'required': False, 'choices':['on', 'off']},
'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']},
+ 'origin': {'required': False},
'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']},
'quota': {'required': False},
'readonly': {'required': False, 'choices':['on', 'off']},
diff --git a/test-docs.sh b/test-docs.sh
new file mode 100755
index 00000000..76297fba
--- /dev/null
+++ b/test-docs.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+set -x
+
+CHECKOUT_DIR=".ansible-checkout"
+MOD_REPO="$1"
+
+# Hidden file to avoid the module_formatter recursing into the checkout
+git clone https://github.com/ansible/ansible "$CHECKOUT_DIR"
+cd "$CHECKOUT_DIR"
+git submodule update --init
+rm -rf "lib/ansible/modules/$MOD_REPO"
+ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO"
+
+pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx
+
+. ./hacking/env-setup
+PAGER=/bin/cat bin/ansible-doc -l
+if [ $? -ne 0 ] ; then
+ exit $?
+fi
+make -C docsite
diff --git a/windows/win_iis_virtualdirectory.ps1 b/windows/win_iis_virtualdirectory.ps1
new file mode 100644
index 00000000..3f2ab692
--- /dev/null
+++ b/windows/win_iis_virtualdirectory.ps1
@@ -0,0 +1,128 @@
+#!powershell
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# Site
+$site = Get-Attr $params "site" $FALSE;
+If ($site -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: site";
+}
+
+# Application
+$application = Get-Attr $params "application" $FALSE;
+
+# State parameter
+$state = Get-Attr $params "state" "present";
+If (($state -ne 'present') -and ($state -ne 'absent')) {
+ Fail-Json $result "state is '$state'; must be 'present' or 'absent'"
+}
+
+# Path parameter
+$physical_path = Get-Attr $params "physical_path" $FALSE;
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) {
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ directory = New-Object psobject
+ changed = $false
+};
+
+# Construct path
+$directory_path = if($application) {
+ "IIS:\Sites\$($site)\$($application)\$($name)"
+} else {
+ "IIS:\Sites\$($site)\$($name)"
+}
+
+# Directory info
+$directory = Get-WebVirtualDirectory -Site $site -Name $name
+
+try {
+ # Add directory
+ If(($state -eq 'present') -and (-not $directory)) {
+ If ($physical_path -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required arguments: physical_path"
+ }
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $directory_parameters = New-Object psobject @{
+ Site = $site
+ Name = $name
+ PhysicalPath = $physical_path
+ };
+
+ If ($application) {
+ $directory_parameters.Application = $application
+ }
+
+ $directory = New-WebVirtualDirectory @directory_parameters -Force
+ $result.changed = $true
+ }
+
+ # Remove directory
+ If ($state -eq 'absent' -and $directory) {
+ Remove-Item $directory_path
+ $result.changed = $true
+ }
+
+ $directory = Get-WebVirtualDirectory -Site $site -Name $name
+ If($directory) {
+
+ # Change Physical Path if needed
+ if($physical_path) {
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $vdir_folder = Get-Item $directory.PhysicalPath
+ $folder = Get-Item $physical_path
+ If($folder.FullName -ne $vdir_folder.FullName) {
+ Set-ItemProperty $directory_path -name physicalPath -value $physical_path
+ $result.changed = $true
+ }
+ }
+ }
+} catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+# Result
+$directory = Get-WebVirtualDirectory -Site $site -Name $name
+$result.directory = New-Object psobject @{
+ PhysicalPath = $directory.PhysicalPath
+}
+
+Exit-Json $result
diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py
new file mode 100644
index 00000000..1ccb34a6
--- /dev/null
+++ b/windows/win_iis_virtualdirectory.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_iis_virtualdirectory
+version_added: "2.0"
+short_description: Configures a IIS virtual directories.
+description:
+ - Creates, Removes and configures a IIS Web site
+options:
+ name:
+ description:
+ - The name of the virtual directory to create or remove
+ required: true
+ state:
+ description:
+ - Whether to add or remove the specified virtual directory
+ choices:
+ - absent
+ - present
+ required: false
+ default: null
+ site:
+ description:
+ - The site name under which the virtual directory is created or exists.
+ required: false
+ default: null
+ application:
+ description:
+ - The application under which the virtual directory is created or exists.
+ required: false
+ default: null
+ physical_path:
+ description:
+ - The physical path to the folder in which the new virtual directory is created. The specified folder must already exist.
+ required: false
+ default: null
+author: Henrik Wallström
+'''
diff --git a/windows/win_iis_webapplication.ps1 b/windows/win_iis_webapplication.ps1
new file mode 100644
index 00000000..e576dd50
--- /dev/null
+++ b/windows/win_iis_webapplication.ps1
@@ -0,0 +1,132 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# Site
+$site = Get-Attr $params "site" $FALSE;
+If ($site -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: site";
+}
+
+# State parameter
+$state = Get-Attr $params "state" "present";
+$state.ToString().ToLower();
+If (($state -ne 'present') -and ($state -ne 'absent')) {
+ Fail-Json $result "state is '$state'; must be 'present' or 'absent'"
+}
+
+# Path parameter
+$physical_path = Get-Attr $params "physical_path" $FALSE;
+
+# Application Pool Parameter
+$application_pool = Get-Attr $params "application_pool" $FALSE;
+
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) {
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ application = New-Object psobject
+ changed = $false
+};
+
+# Application info
+$application = Get-WebApplication -Site $site -Name $name
+
+try {
+ # Add application
+ If(($state -eq 'present') -and (-not $application)) {
+ If ($physical_path -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required arguments: physical_path"
+ }
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $application_parameters = New-Object psobject @{
+ Site = $site
+ Name = $name
+ PhysicalPath = $physical_path
+ };
+
+ If ($application_pool) {
+ $application_parameters.ApplicationPool = $application_pool
+ }
+
+ $application = New-WebApplication @application_parameters -Force
+ $result.changed = $true
+
+ }
+
+ # Remove application
+ if ($state -eq 'absent' -and $application) {
+ $application = Remove-WebApplication -Site $site -Name $name
+ $result.changed = $true
+ }
+
+ $application = Get-WebApplication -Site $site -Name $name
+ If($application) {
+
+ # Change Physical Path if needed
+ if($physical_path) {
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $app_folder = Get-Item $application.PhysicalPath
+ $folder = Get-Item $physical_path
+ If($folder.FullName -ne $app_folder.FullName) {
+ Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name physicalPath -value $physical_path
+ $result.changed = $true
+ }
+ }
+
+ # Change Application Pool if needed
+ if($application_pool) {
+ If($application_pool -ne $application.applicationPool) {
+ Set-ItemProperty "IIS:\Sites\$($site)\$($name)" -name applicationPool -value $application_pool
+ $result.changed = $true
+ }
+ }
+ }
+} catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+# Result
+$application = Get-WebApplication -Site $site -Name $name
+$result.application = New-Object psobject @{
+ PhysicalPath = $application.PhysicalPath
+ ApplicationPool = $application.applicationPool
+}
+
+Exit-Json $result
diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py
new file mode 100644
index 00000000..b8ebd085
--- /dev/null
+++ b/windows/win_iis_webapplication.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_iis_webapplication
+version_added: "2.0"
+short_description: Configures a IIS Web application.
+description:
+ - Creates, Removes and configures a IIS Web applications
+options:
+ name:
+ description:
+ - Name of the Web applicatio
+ required: true
+ default: null
+ aliases: []
+ site:
+ description:
+ - Name of the site on which the application is created.
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the web application
+ choices:
+ - present
+ - absent
+ required: false
+ default: null
+ aliases: []
+ physical_path:
+ description:
+ - The physical path on the remote host to use for the new applicatiojn. The specified folder must already exist.
+ required: false
+ default: null
+ aliases: []
+ application_pool:
+ description:
+ - The application pool in which the new site executes.
+ required: false
+ default: null
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+$ ansible -i hosts -m win_iis_webapplication -a "name=api site=acme physical_path=c:\\apps\\acme\\api" host
+
+'''
diff --git a/windows/win_iis_webapppool.ps1 b/windows/win_iis_webapppool.ps1
new file mode 100644
index 00000000..2ed369e4
--- /dev/null
+++ b/windows/win_iis_webapppool.ps1
@@ -0,0 +1,112 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# State parameter
+$state = Get-Attr $params "state" $FALSE;
+$valid_states = ('started', 'restarted', 'stopped', 'absent');
+If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) {
+ Fail-Json $result "state is '$state'; must be $($valid_states)"
+}
+
+# Attributes parameter - Pipe separated list of attributes where
+# keys and values are separated by comma (paramA:valyeA|paramB:valueB)
+$attributes = @{};
+If ($params.attributes) {
+ $params.attributes -split '\|' | foreach {
+ $key, $value = $_ -split "\:";
+ $attributes.Add($key, $value);
+ }
+}
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $NULL){
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ changed = $FALSE
+ attributes = $attributes
+};
+
+# Get pool
+$pool = Get-Item IIS:\AppPools\$name
+
+try {
+ # Add
+ if (-not $pool -and $state -in ('started', 'stopped', 'restarted')) {
+ New-WebAppPool $name
+ $result.changed = $TRUE
+ }
+
+ # Remove
+ if ($pool -and $state -eq 'absent') {
+ Remove-WebAppPool $name
+ $result.changed = $TRUE
+ }
+
+ $pool = Get-Item IIS:\AppPools\$name
+ if($pool) {
+ # Set properties
+ $attributes.GetEnumerator() | foreach {
+ $newParameter = $_;
+ $currentParameter = Get-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key
+ if(-not $currentParameter -or ($currentParameter.Value -as [String]) -ne $newParameter.Value) {
+ Set-ItemProperty ("IIS:\AppPools\" + $name) $newParameter.Key $newParameter.Value
+ $result.changed = $TRUE
+ }
+ }
+
+ # Set run state
+ if (($state -eq 'stopped') -and ($pool.State -eq 'Started')) {
+ Stop-WebAppPool -Name $name -ErrorAction Stop
+ $result.changed = $TRUE
+ }
+ if ((($state -eq 'started') -and ($pool.State -eq 'Stopped')) -or ($state -eq 'restarted')) {
+ Start-WebAppPool -Name $name -ErrorAction Stop
+ $result.changed = $TRUE
+ }
+ }
+} catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+# Result
+$pool = Get-Item IIS:\AppPools\$name
+$result.info = @{
+ name = $pool.Name
+ state = $pool.State
+ attributes = New-Object psobject @{}
+};
+
+$pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)};
+
+Exit-Json $result
diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py
new file mode 100644
index 00000000..c77c3b04
--- /dev/null
+++ b/windows/win_iis_webapppool.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: win_iis_webapppool
+version_added: "2.0"
+short_description: Configures a IIS Web Application Pool.
+description:
+ - Creates, Removes and configures a IIS Web Application Pool
+options:
+ name:
+ description:
+ - Names of application pool
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the binding
+ choices:
+ - absent
+ - stopped
+ - started
+ - restarted
+ required: false
+ default: null
+ aliases: []
+ attributes:
+ description:
+ - Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
+ required: false
+ default: null
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This return information about an existing application pool
+$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows
+host | success >> {
+ "attributes": {},
+ "changed": false,
+ "info": {
+ "attributes": {
+ "CLRConfigFile": "",
+ "applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415",
+ "autoStart": true,
+ "enable32BitAppOnWin64": false,
+ "enableConfigurationOverride": true,
+ "managedPipelineMode": 0,
+ "managedRuntimeLoader": "webengine4.dll",
+ "managedRuntimeVersion": "v4.0",
+ "name": "DefaultAppPool",
+ "passAnonymousToken": true,
+ "queueLength": 1000,
+ "startMode": 0,
+ "state": 1
+ },
+ "name": "DefaultAppPool",
+ "state": "Started"
+ }
+}
+
+# This creates a new application pool in 'Started' state
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows
+
+# This stoppes an application pool
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows
+
+# This restarts an application pool
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
+
+# This restarts an application pool
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
+
+# This change application pool attributes without touching state
+$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
+
+# This creates an application pool and sets attributes
+$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
+
+
+# Playbook example
+---
+
+- name: App Pool with .NET 4.0
+ win_iis_webapppool:
+ name: 'AppPool'
+ state: started
+ attributes: managedRuntimeVersion:v4.0
+ register: webapppool
+
+'''
diff --git a/windows/win_iis_webbinding.ps1 b/windows/win_iis_webbinding.ps1
new file mode 100644
index 00000000..bdff43fc
--- /dev/null
+++ b/windows/win_iis_webbinding.ps1
@@ -0,0 +1,138 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# State parameter
+$state = Get-Attr $params "state" $FALSE;
+$valid_states = ($FALSE, 'present', 'absent');
+If ($state -NotIn $valid_states) {
+ Fail-Json $result "state is '$state'; must be $($valid_states)"
+}
+
+$binding_parameters = New-Object psobject @{
+ Name = $name
+};
+
+If ($params.host_header) {
+ $binding_parameters.HostHeader = $params.host_header
+}
+
+If ($params.protocol) {
+ $binding_parameters.Protocol = $params.protocol
+}
+
+If ($params.port) {
+ $binding_parameters.Port = $params.port
+}
+
+If ($params.ip) {
+ $binding_parameters.IPAddress = $params.ip
+}
+
+$certificateHash = Get-Attr $params "certificate_hash" $FALSE;
+$certificateStoreName = Get-Attr $params "certificate_store_name" "MY";
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){
+ Import-Module WebAdministration
+}
+
+function Create-Binding-Info {
+ return New-Object psobject @{
+ "bindingInformation" = $args[0].bindingInformation
+ "certificateHash" = $args[0].certificateHash
+ "certificateStoreName" = $args[0].certificateStoreName
+ "isDsMapperEnabled" = $args[0].isDsMapperEnabled
+ "protocol" = $args[0].protocol
+ "sslFlags" = $args[0].sslFlags
+ }
+}
+
+# Result
+$result = New-Object psobject @{
+ changed = $false
+ parameters = $binding_parameters
+ matched = @()
+ removed = @()
+ added = @()
+};
+
+# Get bindings matching parameters
+$curent_bindings = Get-WebBinding @binding_parameters
+$curent_bindings | Foreach {
+ $result.matched += Create-Binding-Info $_
+}
+
+try {
+ # Add
+ if (-not $curent_bindings -and $state -eq 'present') {
+ New-WebBinding @binding_parameters -Force
+
+ # Select certificat
+ if($certificateHash -ne $FALSE) {
+
+ $ip = $binding_parameters.IPAddress
+ if((!$ip) -or ($ip -eq "*")) {
+ $ip = "0.0.0.0"
+ }
+
+ $port = $binding_parameters.Port
+ if(!$port) {
+ $port = 443
+ }
+
+ $result.port = $port
+ $result.ip = $ip
+
+ Push-Location IIS:\SslBindings\
+ Get-Item Cert:\LocalMachine\$certificateStoreName\$certificateHash | New-Item "$($ip)!$($port)"
+ Pop-Location
+ }
+
+ $result.added += Create-Binding-Info (Get-WebBinding @binding_parameters)
+ $result.changed = $true
+ }
+
+ # Remove
+ if ($curent_bindings -and $state -eq 'absent') {
+ $curent_bindings | foreach {
+ Remove-WebBinding -InputObject $_
+ $result.removed += Create-Binding-Info $_
+ }
+ $result.changed = $true
+ }
+
+
+}
+catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+Exit-Json $result
diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py
new file mode 100644
index 00000000..061bed73
--- /dev/null
+++ b/windows/win_iis_webbinding.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+DOCUMENTATION = '''
+---
+module: win_iis_webbinding
+version_added: "2.0"
+short_description: Configures a IIS Web site.
+description:
+ - Creates, Removes and configures a binding to an existing IIS Web site
+options:
+ name:
+ description:
+ - Names of web site
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the binding
+ choices:
+ - present
+ - absent
+ required: false
+ default: null
+ aliases: []
+ port:
+ description:
+ - The port to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ ip:
+ description:
+ - The IP address to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ host_header:
+ description:
+ - The host header to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ protocol:
+ description:
+ - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP).
+ required: false
+ default: null
+ aliases: []
+ protocol:
+ description:
+ - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP).
+ required: false
+ default: null
+ aliases: []
+ certificate_hash:
+ description:
+ - Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate.
+ required: false
+ default: null
+ aliases: []
+ certificate_store_name:
+ description:
+ - Name of the certificate store where the certificate for the binding is located.
+ required: false
+ default: "My"
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This will return binding information for an existing host
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site'" windows
+host | success >> {
+ "added": [],
+ "changed": false,
+ "matched": [
+ {
+ "bindingInformation": "*:80:",
+ "certificateHash": "",
+ "certificateStoreName": "",
+ "isDsMapperEnabled": false,
+ "protocol": "http",
+ "sslFlags": 0
+ }
+ ],
+ "parameters": {
+ "Name": "Default Web Site"
+ },
+ "removed": []
+}
+
+# This will return the HTTPS binding information for an existing host
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https" windows
+
+# This will return the HTTPS binding information for an existing host
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port:9090 state=present" windows
+
+# This will add a HTTP binding on port 9090
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows
+
+# This will remove the HTTP binding on port 9090
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' port=9090 state=present" windows
+
+# This will add a HTTPS binding
+$ ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https state=present" windows
+
+# This will add a HTTPS binding and select certificate to use
+# ansible -i vagrant-inventory -m win_iis_webbinding -a "name='Default Web Site' protocol=https certificate_hash= B0D0FA8408FC67B230338FCA584D03792DA73F4C" windows
+
+
+# Playbook example
+---
+
+- name: Website http/https bidings
+ win_iis_webbinding:
+ name: "Default Web Site"
+ protocol: https
+ port: 443
+ certificate_hash: "D1A3AF8988FD32D1A3AF8988FD323792DA73F4C"
+ state: present
+ when: monitor_use_https
+
+'''
diff --git a/windows/win_iis_website.ps1 b/windows/win_iis_website.ps1
new file mode 100644
index 00000000..bba1e941
--- /dev/null
+++ b/windows/win_iis_website.ps1
@@ -0,0 +1,179 @@
+#!powershell
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+# Name parameter
+$name = Get-Attr $params "name" $FALSE;
+If ($name -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required argument: name";
+}
+
+# State parameter
+$state = Get-Attr $params "state" $FALSE;
+$state.ToString().ToLower();
+If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted') -and ($state -ne 'absent')) {
+ Fail-Json (New-Object psobject) "state is '$state'; must be 'started', 'restarted', 'stopped' or 'absent'"
+}
+
+# Path parameter
+$physical_path = Get-Attr $params "physical_path" $FALSE;
+
+# Application Pool Parameter
+$application_pool = Get-Attr $params "application_pool" $FALSE;
+
+# Binding Parameters
+$bind_port = Get-Attr $params "port" $FALSE;
+$bind_ip = Get-Attr $params "ip" $FALSE;
+$bind_hostname = Get-Attr $params "hostname" $FALSE;
+$bind_ssl = Get-Attr $params "ssl" $FALSE;
+
+# Custom site Parameters from string where properties
+# are seperated by a pipe and property name/values by colon.
+# Ex. "foo:1|bar:2"
+$parameters = Get-Attr $params "parameters" $null;
+if($parameters -ne $null) {
+ $parameters = @($parameters -split '\|' | ForEach {
+ return ,($_ -split "\:", 2);
+ })
+}
+
+
+# Ensure WebAdministration module is loaded
+if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null) {
+ Import-Module WebAdministration
+}
+
+# Result
+$result = New-Object psobject @{
+ site = New-Object psobject
+ changed = $false
+};
+
+# Site info
+$site = Get-Website -Name $name
+
+Try {
+ # Add site
+ If(($state -ne 'absent') -and (-not $site)) {
+ If ($physical_path -eq $FALSE) {
+ Fail-Json (New-Object psobject) "missing required arguments: physical_path"
+ }
+ ElseIf (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $site_parameters = New-Object psobject @{
+ Name = $name
+ PhysicalPath = $physical_path
+ };
+
+ If ($application_pool) {
+ $site_parameters.ApplicationPool = $application_pool
+ }
+
+ If ($bind_port) {
+ $site_parameters.Port = $bind_port
+ }
+
+ If ($bind_ip) {
+ $site_parameters.IPAddress = $bind_ip
+ }
+
+ If ($bind_hostname) {
+ $site_parameters.HostHeader = $bind_hostname
+ }
+
+ $site = New-Website @site_parameters -Force
+ $result.changed = $true
+ }
+
+ # Remove site
+ If ($state -eq 'absent' -and $site) {
+ $site = Remove-Website -Name $name
+ $result.changed = $true
+ }
+
+ $site = Get-Website -Name $name
+ If($site) {
+ # Change Physical Path if needed
+ if($physical_path) {
+ If (-not (Test-Path $physical_path)) {
+ Fail-Json (New-Object psobject) "specified folder must already exist: physical_path"
+ }
+
+ $folder = Get-Item $physical_path
+ If($folder.FullName -ne $site.PhysicalPath) {
+ Set-ItemProperty "IIS:\Sites\$($site.Name)" -name physicalPath -value $folder.FullName
+ $result.changed = $true
+ }
+ }
+
+ # Change Application Pool if needed
+ if($application_pool) {
+ If($application_pool -ne $site.applicationPool) {
+ Set-ItemProperty "IIS:\Sites\$($site.Name)" -name applicationPool -value $application_pool
+ $result.changed = $true
+ }
+ }
+
+ # Set properties
+ if($parameters) {
+ $parameters | foreach {
+ $parameter_value = Get-ItemProperty "IIS:\Sites\$($site.Name)" $_[0]
+ if((-not $parameter_value) -or ($parameter_value.Value -as [String]) -ne $_[1]) {
+ Set-ItemProperty "IIS:\Sites\$($site.Name)" $_[0] $_[1]
+ $result.changed = $true
+ }
+ }
+ }
+
+ # Set run state
+ if (($state -eq 'stopped') -and ($site.State -eq 'Started'))
+ {
+ Stop-Website -Name $name -ErrorAction Stop
+ $result.changed = $true
+ }
+ if ((($state -eq 'started') -and ($site.State -eq 'Stopped')) -or ($state -eq 'restarted'))
+ {
+ Start-Website -Name $name -ErrorAction Stop
+ $result.changed = $true
+ }
+ }
+}
+Catch
+{
+ Fail-Json (New-Object psobject) $_.Exception.Message
+}
+
+$site = Get-Website -Name $name
+$result.site = New-Object psobject @{
+ Name = $site.Name
+ ID = $site.ID
+ State = $site.State
+ PhysicalPath = $site.PhysicalPath
+ ApplicationPool = $site.applicationPool
+ Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation })
+}
+
+
+Exit-Json $result
diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py
new file mode 100644
index 00000000..8921afe5
--- /dev/null
+++ b/windows/win_iis_website.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: win_iis_website
+version_added: "2.0"
+short_description: Configures a IIS Web site.
+description:
+ - Creates, Removes and configures a IIS Web site
+options:
+ name:
+ description:
+ - Names of web site
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - State of the web site
+ choices:
+ - started
+ - restarted
+ - stopped
+ - absent
+ required: false
+ default: null
+ aliases: []
+ physical_path:
+ description:
+ - The physical path on the remote host to use for the new site. The specified folder must already exist.
+ required: false
+ default: null
+ aliases: []
+ application_pool:
+ description:
+ - The application pool in which the new site executes.
+ required: false
+ default: null
+ aliases: []
+ port:
+ description:
+ - The port to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ ip:
+ description:
+ - The IP address to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ hostname:
+ description:
+ - The host header to bind to / use for the new site.
+ required: false
+ default: null
+ aliases: []
+ ssl:
+ description:
+ - Enables HTTPS binding on the site..
+ required: false
+ default: null
+ aliases: []
+ parameters:
+ description:
+ - Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2"
+ required: false
+ default: null
+ aliases: []
+author: Henrik Wallström
+'''
+
+EXAMPLES = '''
+# This return information about an existing host
+$ ansible -i vagrant-inventory -m win_iis_website -a "name='Default Web Site'" window
+host | success >> {
+ "changed": false,
+ "site": {
+ "ApplicationPool": "DefaultAppPool",
+ "Bindings": [
+ "*:80:"
+ ],
+ "ID": 1,
+ "Name": "Default Web Site",
+ "PhysicalPath": "%SystemDrive%\\inetpub\\wwwroot",
+ "State": "Stopped"
+ }
+}
+
+# This stops an existing site.
+$ ansible -i hosts -m win_iis_website -a "name='Default Web Site' state=stopped" host
+
+# This creates a new site.
+$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host
+
+# Change logfile .
+$ ansible -i hosts -m win_iis_website -a "name=acme physical_path=c:\\sites\\acme" host
+
+
+# Playbook example
+---
+
+- name: Acme IIS site
+ win_iis_website:
+ name: "Acme"
+ state: started
+ port: 80
+ ip: 127.0.0.1
+ hostname: acme.local
+ application_pool: "acme"
+ physical_path: 'c:\\sites\\acme'
+ parameters: 'logfile.directory:c:\\sites\\logs'
+ register: website
+
+'''
diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1
new file mode 100644
index 00000000..1a257413
--- /dev/null
+++ b/windows/win_regedit.ps1
@@ -0,0 +1,190 @@
+#!powershell
+# This file is part of Ansible
+#
+# (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+If ($params.key)
+{
+ $registryKey = $params.key
+}
+Else
+{
+ Fail-Json $result "missing required argument: key"
+}
+
+If ($params.value)
+{
+ $registryValue = $params.value
+}
+Else
+{
+ $registryValue = $null
+}
+
+If ($params.state)
+{
+ $state = $params.state.ToString().ToLower()
+ If (($state -ne "present") -and ($state -ne "absent"))
+ {
+ Fail-Json $result "state is $state; must be present or absent"
+ }
+}
+Else
+{
+ $state = "present"
+}
+
+If ($params.data)
+{
+ $registryData = $params.data
+}
+ElseIf ($state -eq "present" -and $registryValue -ne $null)
+{
+ Fail-Json $result "missing required argument: data"
+}
+
+If ($params.datatype)
+{
+ $registryDataType = $params.datatype.ToString().ToLower()
+ $validRegistryDataTypes = "binary", "dword", "expandstring", "multistring", "string", "qword"
+ If ($validRegistryDataTypes -notcontains $registryDataType)
+ {
+ Fail-Json $result "type is $registryDataType; must be binary, dword, expandstring, multistring, string, or qword"
+ }
+}
+Else
+{
+ $registryDataType = "string"
+}
+
+Function Test-RegistryValueData {
+ Param (
+ [parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]$Path,
+ [parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]$Value
+ )
+ Try {
+ Get-ItemProperty -Path $Path -Name $Value
+ Return $true
+ }
+ Catch {
+ Return $false
+ }
+}
+
+if($state -eq "present") {
+ if ((Test-Path $registryKey) -and $registryValue -ne $null)
+ {
+ if (Test-RegistryValueData -Path $registryKey -Value $registryValue)
+ {
+ # Changes Data and DataType
+ if ((Get-Item $registryKey).GetValueKind($registryValue) -ne $registryDataType)
+ {
+ Try
+ {
+ Remove-ItemProperty -Path $registryKey -Name $registryValue
+ New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ # Changes Only Data
+ elseif ((Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue) -ne $registryData)
+ {
+ Try {
+ Set-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ }
+ else
+ {
+ Try
+ {
+ New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ }
+ elseif(-not (Test-Path $registryKey))
+ {
+ Try
+ {
+ $newRegistryKey = New-Item $registryKey -Force
+ $result.changed = $true
+
+ if($registryValue -ne $null) {
+ $newRegistryKey | New-ItemProperty -Name $registryValue -Value $registryData -Force -PropertyType $registryDataType
+ $result.changed = $true
+ }
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+}
+else
+{
+ if (Test-Path $registryKey)
+ {
+ if ($registryValue -eq $null) {
+ Try
+ {
+ Remove-Item -Path $registryKey -Recurse
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ elseif (Test-RegistryValueData -Path $registryKey -Value $registryValue) {
+ Try
+ {
+ Remove-ItemProperty -Path $registryKey -Name $registryValue
+ $result.changed = $true
+ }
+ Catch
+ {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ }
+}
+
+Exit-Json $result
diff --git a/windows/win_regedit.py b/windows/win_regedit.py
new file mode 100644
index 00000000..5087a5ea
--- /dev/null
+++ b/windows/win_regedit.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_regedit
+version_added: "2.0"
+short_description: Add, Edit, or Remove Registry Keys and Values
+description:
+ - Add, Edit, or Remove Registry Keys and Values using ItemProperties Cmdlets
+options:
+ key:
+ description:
+ - Name of Registry Key
+ required: true
+ default: null
+ aliases: []
+ value:
+ description:
+ - Name of Registry Value
+ required: true
+ default: null
+ aliases: []
+ data:
+ description:
+ - Registry Value Data
+ required: false
+ default: null
+ aliases: []
+ datatype:
+ description:
+ - Registry Value Data Type
+ required: false
+ choices:
+ - binary
+ - dword
+ - expandstring
+ - multistring
+ - string
+ - qword
+ default: string
+ aliases: []
+ state:
+ description:
+ - State of Registry Value
+ required: false
+ choices:
+ - present
+ - absent
+ default: present
+ aliases: []
+author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)"
+'''
+
+EXAMPLES = '''
+ # Creates Registry Key called MyCompany.
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # data for the value "hello" containing "world".
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: world
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # data for the value "hello" containing "1337" as type "dword".
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: 1337
+ datatype: dword
+
+ # Delete Registry Key MyCompany
+ # NOTE: Not specifying a value will delete the root key which means
+ # all values will be deleted
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ state: absent
+
+ # Delete Registry Value "hello" from MyCompany Key
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ state: absent
+'''
diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1
new file mode 100644
index 00000000..2f802f59
--- /dev/null
+++ b/windows/win_scheduled_task.ps1
@@ -0,0 +1,74 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Peter Mounce <public@neverrunwithscissors.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+$ErrorActionPreference = "Stop"
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+if ($params.name)
+{
+ $name = $params.name
+}
+else
+{
+ Fail-Json $result "missing required argument: name"
+}
+if ($params.enabled)
+{
+ $enabled = $params.enabled | ConvertTo-Bool
+}
+else
+{
+ $enabled = $true
+}
+$target_state = @{$true = "Enabled"; $false="Disabled"}[$enabled]
+
+try
+{
+ $tasks = Get-ScheduledTask -TaskPath $name
+ $tasks_needing_changing = $tasks |? { $_.State -ne $target_state }
+ if (-not($tasks_needing_changing -eq $null))
+ {
+ if ($enabled)
+ {
+ $tasks_needing_changing | Enable-ScheduledTask
+ }
+ else
+ {
+ $tasks_needing_changing | Disable-ScheduledTask
+ }
+ Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName })
+ $result.changed = $true
+ }
+ else
+ {
+ Set-Attr $result "tasks_changed" @()
+ $result.changed = $false
+ }
+
+ Exit-Json $result;
+}
+catch
+{
+ Fail-Json $result $_.Exception.Message
+}
diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py
new file mode 100644
index 00000000..2c586740
--- /dev/null
+++ b/windows/win_scheduled_task.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Peter Mounce <public@neverrunwithscissors.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_scheduled_task
+version_added: "2.0"
+short_description: Manage scheduled tasks
+description:
+ - Manage scheduled tasks
+options:
+ name:
+ description:
+ - Name of the scheduled task
+ - Supports * as wildcard
+ required: true
+ enabled:
+ description:
+ - State that the task should become
+ required: false
+ choices:
+ - yes
+ - no
+ default: yes
+author: Peter Mounce
+'''
+
+EXAMPLES = '''
+ # Disable the scheduled tasks with "WindowsUpdate" in their name
+ win_scheduled_task: name="*WindowsUpdate*" enabled=no
+'''
diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1
new file mode 100644
index 00000000..a62f246f
--- /dev/null
+++ b/windows/win_unzip.ps1
@@ -0,0 +1,157 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_unzip = New-Object psobject
+ changed = $false
+}
+
+If ($params.creates) {
+ If (Test-Path $params.creates) {
+ Exit-Json $result "The 'creates' file or directory already exists."
+ }
+
+}
+
+If ($params.src) {
+ $src = $params.src.toString()
+
+ If (-Not (Test-Path -path $src)){
+ Fail-Json $result "src file: $src does not exist."
+ }
+
+ $ext = [System.IO.Path]::GetExtension($src)
+}
+Else {
+ Fail-Json $result "missing required argument: src"
+}
+
+If (-Not($params.dest -eq $null)) {
+ $dest = $params.dest.toString()
+
+ If (-Not (Test-Path $dest -PathType Container)){
+ Try{
+ New-Item -itemtype directory -path $dest
+ }
+ Catch {
+ Fail-Json $result "Error creating $dest directory"
+ }
+ }
+}
+Else {
+ Fail-Json $result "missing required argument: dest"
+}
+
+If ($params.recurse) {
+ $recurse = ConvertTo-Bool ($params.recurse)
+}
+Else {
+ $recurse = $false
+}
+
+If ($params.rm) {
+ $rm = ConvertTo-Bool ($params.rm)
+}
+Else {
+ $rm = $false
+}
+
+If ($ext -eq ".zip" -And $recurse -eq $false) {
+ Try {
+ $shell = New-Object -ComObject Shell.Application
+ $shell.NameSpace($dest).copyhere(($shell.NameSpace($src)).items(), 20)
+ $result.changed = $true
+ }
+ Catch {
+ Fail-Json $result "Error unzipping $src to $dest"
+ }
+}
+# Requires PSCX
+Else {
+ # Check if PSCX is installed
+ $list = Get-Module -ListAvailable
+
+ If (-Not ($list -match "PSCX")) {
+ Fail-Json $result "PowerShellCommunityExtensions PowerShell Module (PSCX) is required for non-'.zip' compressed archive types."
+ }
+ Else {
+ Set-Attr $result.win_unzip "pscx_status" "present"
+ }
+
+ # Import
+ Try {
+ Import-Module PSCX
+ }
+ Catch {
+ Fail-Json $result "Error importing module PSCX"
+ }
+
+ Try {
+ If ($recurse) {
+ Expand-Archive -Path $src -OutputPath $dest -Force
+
+ If ($rm -eq $true) {
+ Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % {
+ Expand-Archive $_.FullName -OutputPath $dest -Force
+ Remove-Item $_.FullName -Force
+ }
+ }
+ Else {
+ Get-ChildItem $dest -recurse | Where {$_.extension -eq ".gz" -Or $_.extension -eq ".zip" -Or $_.extension -eq ".bz2" -Or $_.extension -eq ".tar" -Or $_.extension -eq ".msu"} | % {
+ Expand-Archive $_.FullName -OutputPath $dest -Force
+ }
+ }
+ }
+ Else {
+ Expand-Archive -Path $src -OutputPath $dest -Force
+ }
+ }
+ Catch {
+ If ($recurse) {
+ Fail-Json $result "Error recursively expanding $src to $dest"
+ }
+ Else {
+ Fail-Json $result "Error expanding $src to $dest"
+ }
+ }
+}
+
+If ($rm -eq $true){
+ Remove-Item $src -Recurse -Force
+ Set-Attr $result.win_unzip "rm" "true"
+}
+
+# Fixes a fail error message (when the task actually succeeds) for a "Convert-ToJson: The converted JSON string is in bad format"
+# This happens when JSON is parsing a string that ends with a "\", which is possible when specifying a directory to download to.
+# This catches that possible error, before assigning the JSON $result
+If ($src[$src.length-1] -eq "\") {
+ $src = $src.Substring(0, $src.length-1)
+}
+If ($dest[$dest.length-1] -eq "\") {
+ $dest = $dest.Substring(0, $dest.length-1)
+}
+Set-Attr $result.win_unzip "src" $src.toString()
+Set-Attr $result.win_unzip "dest" $dest.toString()
+Set-Attr $result.win_unzip "recurse" $recurse.toString()
+
+Exit-Json $result; \ No newline at end of file
diff --git a/windows/win_unzip.py b/windows/win_unzip.py
new file mode 100644
index 00000000..aa0180ba
--- /dev/null
+++ b/windows/win_unzip.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+DOCUMENTATION = '''
+---
+module: win_unzip
+version_added: "2.0"
+short_description: Unzips compressed files and archives on the Windows node
+description:
+ - Unzips compressed files and archives. For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX) has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
+options:
+ src:
+ description:
+ - File to be unzipped (provide absolute path)
+ required: true
+ dest:
+ description:
+ - Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created.
+ required: true
+ rm:
+ description:
+ - Remove the zip file, after unzipping
+ required: no
+ choices:
+ - true
+ - false
+ - yes
+ - no
+ default: false
+ recurse:
+ description:
+ - Recursively expand zipped files within the src file.
+ required: no
+ default: false
+ choices:
+ - true
+ - false
+ - yes
+ - no
+ creates:
+ description:
+ - If this file or directory exists the specified src will not be extracted.
+ required: no
+ default: null
+author: Phil Schwartz
+'''
+
+EXAMPLES = '''
+# This unzips a library that was downloaded with win_get_url, and removes the file after extraction
+$ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all
+# Playbook example
+
+# Simple unzip
+---
+- name: Unzip a bz2 (BZip) file
+ win_unzip:
+ src: "C:\Users\Phil\Logs.bz2"
+ dest: "C:\Users\Phil\OldLogs"
+ creates: "C:\Users\Phil\OldLogs"
+
+# This playbook example unzips a .zip file and recursively decompresses the contained .gz files and removes all unneeded compressed files after completion.
+---
+- name: Unzip ApplicationLogs.zip and decompress all GZipped log files
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Recursively decompress GZ files in ApplicationLogs.zip
+ win_unzip:
+ src: C:\Downloads\ApplicationLogs.zip
+ dest: C:\Application\Logs
+ recurse: yes
+ rm: true
+
+# Install PSCX to use for extracting a gz file
+ - name: Grab PSCX msi
+ win_get_url:
+ url: 'http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959'
+ dest: 'C:\\pscx.msi'
+ - name: Install PSCX
+ win_msi:
+ path: 'C:\\pscx.msi'
+ - name: Unzip gz log
+ win_unzip:
+ src: "C:\\Logs\\application-error-logs.gz"
+ dest: "C:\\ExtractedLogs\\application-error-logs"
+'''