summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorToshio Kuratomi <toshio@fedoraproject.org>2015-11-30 19:02:28 -0800
committerToshio Kuratomi <toshio@fedoraproject.org>2015-11-30 19:02:28 -0800
commitcd9a7667aa39bbc1ccd606ebebaf3c62f228d601 (patch)
tree36757b87c73aac6155559d9a318c233c78180819
parent2dba0d8d3af177bec57e8d1ef493f48dc479612f (diff)
downloadansible-modules-core-cd9a7667aa39bbc1ccd606ebebaf3c62f228d601.tar.gz
Don't raise or catch StandardError in amazon modules
-rw-r--r--cloud/amazon/ec2_asg.py58
-rw-r--r--cloud/amazon/ec2_elb.py9
-rw-r--r--cloud/amazon/ec2_elb_lb.py5
-rw-r--r--cloud/amazon/ec2_lc.py2
-rw-r--r--cloud/amazon/ec2_metric_alarm.py9
-rw-r--r--cloud/amazon/ec2_scaling_policy.py5
-rw-r--r--cloud/amazon/ec2_vol.py73
-rw-r--r--cloud/amazon/ec2_vpc_net.py49
-rw-r--r--cloud/amazon/rds_param_group.py8
9 files changed, 104 insertions, 114 deletions
diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py
index 39444c73..6564c4c2 100644
--- a/cloud/amazon/ec2_asg.py
+++ b/cloud/amazon/ec2_asg.py
@@ -152,9 +152,9 @@ EXAMPLES = '''
# Rolling ASG Updates
-Below is an example of how to assign a new launch config to an ASG and terminate old instances.
+Below is an example of how to assign a new launch config to an ASG and terminate old instances.
-All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
+All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
@@ -281,7 +281,6 @@ def get_properties(autoscaling_group):
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
-
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
@@ -298,7 +297,6 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
else:
return
- exists = True
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
@@ -315,10 +313,8 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
time.sleep(10)
if wait_timeout <= time.time():
- # waiting took too long
+ # waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
-
-
def elb_healthy(asg_connection, elb_connection, module, group_name):
@@ -337,7 +333,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
- except boto.exception.InvalidInstance, e:
+ except boto.exception.InvalidInstance:
pass
for i in lb_instances:
if i.state == "InService":
@@ -346,7 +342,6 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
return len(healthy_instances)
-
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
@@ -370,7 +365,7 @@ def wait_for_elb(asg_connection, module, group_name):
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
- # waiting took too long
+ # waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
@@ -396,7 +391,7 @@ def create_autoscaling_group(connection, module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
@@ -433,7 +428,7 @@ def create_autoscaling_group(connection, module):
try:
connection.create_auto_scaling_group(ag)
- if wait_for_instances == True:
+ if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
@@ -475,7 +470,7 @@ def create_autoscaling_group(connection, module):
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
- if not tag.key in want_tags:
+ if tag.key not in want_tags:
changed = True
dead_tags.append(tag)
@@ -492,14 +487,13 @@ def create_autoscaling_group(connection, module):
changed = True
as_group.load_balancers = module.params.get('load_balancers')
-
if changed:
try:
as_group.update()
except BotoServerError, e:
module.fail_json(msg=str(e))
- if wait_for_instances == True:
+ if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
@@ -525,7 +519,7 @@ def delete_autoscaling_group(connection, module):
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
- instances = False
+ instances = False
time.sleep(10)
group.delete()
@@ -580,15 +574,15 @@ def replace(connection, module):
changed = True
return(changed, props)
- # we don't want to spin up extra instances if not necessary
+ # we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
- log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
- batch_size = num_new_inst_needed
+ log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
+ batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
-
+
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
@@ -637,7 +631,7 @@ def get_instances_by_lc(props, lc_check, initial_instances):
new_instances.append(i)
else:
old_instances.append(i)
-
+
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
@@ -659,10 +653,10 @@ def list_purgeable_instances(props, lc_check, replace_instances, initial_instanc
# and they have a non-current launch config
if lc_check:
for i in instances:
- if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
+ if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
- for i in instances:
+ for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
@@ -676,7 +670,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
-
+
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
@@ -720,7 +714,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
-
+
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
@@ -756,7 +750,7 @@ def wait_for_term_inst(connection, module, term_instances):
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
-
+
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
@@ -802,9 +796,9 @@ def main():
termination_policies=dict(type='list', default='Default')
),
)
-
+
module = AnsibleModule(
- argument_spec=argument_spec,
+ argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
@@ -826,13 +820,13 @@ def main():
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
- changed = delete_autoscaling_group(connection, module)
- module.exit_json( changed = changed )
+ changed = delete_autoscaling_group(connection, module)
+ module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py
index 4e19a054..5b5569ce 100644
--- a/cloud/amazon/ec2_elb.py
+++ b/cloud/amazon/ec2_elb.py
@@ -257,7 +257,7 @@ class ElbManager:
try:
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
self.module.fail_json(msg=str(e))
elbs = []
@@ -290,7 +290,7 @@ class ElbManager:
try:
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
self.module.fail_json(msg=str(e))
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
@@ -314,7 +314,7 @@ class ElbManager:
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
@@ -374,4 +374,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py
index 1d9b2db2..96ef6b22 100644
--- a/cloud/amazon/ec2_elb_lb.py
+++ b/cloud/amazon/ec2_elb_lb.py
@@ -492,7 +492,7 @@ class ElbManager(object):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
@@ -981,4 +981,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py
index 41b7effa..802b9d05 100644
--- a/cloud/amazon/ec2_lc.py
+++ b/cloud/amazon/ec2_lc.py
@@ -311,7 +311,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')
diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py
index 94f30321..8ae7195f 100644
--- a/cloud/amazon/ec2_metric_alarm.py
+++ b/cloud/amazon/ec2_metric_alarm.py
@@ -115,8 +115,6 @@ EXAMPLES = '''
'''
-import sys
-
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
@@ -270,11 +268,11 @@ def main():
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
+
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
@@ -288,4 +286,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py
index 220fa325..3c412232 100644
--- a/cloud/amazon/ec2_scaling_policy.py
+++ b/cloud/amazon/ec2_scaling_policy.py
@@ -178,7 +178,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg = str(e))
if state == 'present':
@@ -187,4 +187,5 @@ def main():
delete_scaling_policy(connection, module)
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py
index aba121d8..62e36a74 100644
--- a/cloud/amazon/ec2_vol.py
+++ b/cloud/amazon/ec2_vol.py
@@ -47,7 +47,7 @@ options:
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
- and continues to remain the Ansible default for backwards compatibility.
+ and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
@@ -69,7 +69,7 @@ options:
default: null
zone:
description:
- - zone in which to create the volume, if unset uses the zone the instance is in (if set)
+ - zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
@@ -87,7 +87,7 @@ options:
choices: ["yes", "no"]
version_added: "1.5"
state:
- description:
+ description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
@@ -101,15 +101,15 @@ extends_documentation_fragment:
EXAMPLES = '''
# Simple attachment action
-- ec2_vol:
- instance: XXXXXX
- volume_size: 5
+- ec2_vol:
+ instance: XXXXXX
+ volume_size: 5
device_name: sdd
-# Example using custom iops params
+# Example using custom iops params
- ec2_vol:
- instance: XXXXXX
- volume_size: 5
+ instance: XXXXXX
+ volume_size: 5
iops: 100
device_name: sdd
@@ -118,15 +118,15 @@ EXAMPLES = '''
instance: XXXXXX
snapshot: "{{ snapshot }}"
-# Playbook example combined with instance launch
+# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
- wait: yes
+ wait: yes
count: 3
register: ec2
- ec2_vol:
- instance: "{{ item.id }} "
+ instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
@@ -223,7 +223,7 @@ def get_volume(module, ec2):
return vols[0]
def get_volumes(module, ec2):
-
+
instance = module.params.get('instance')
try:
@@ -254,12 +254,10 @@ def boto_supports_volume_encryption():
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
-
+
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
- id = module.params.get('id')
- instance = module.params.get('instance')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
@@ -292,16 +290,16 @@ def create_volume(module, ec2, zone):
def attach_volume(module, ec2, volume, instance):
-
+
device_name = module.params.get('device_name')
changed = False
-
+
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
-
+
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
-
+
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
@@ -311,7 +309,7 @@ def attach_volume(module, ec2, volume, instance):
device_name = '/dev/xvdf'
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
-
+
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
@@ -330,9 +328,9 @@ def attach_volume(module, ec2, volume, instance):
return volume, changed
def detach_volume(module, ec2, volume):
-
+
changed = False
-
+
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
@@ -340,15 +338,15 @@ def detach_volume(module, ec2, volume):
time.sleep(3)
volume.update()
changed = True
-
+
return volume, changed
-
+
def get_volume_info(volume, state):
-
+
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
-
+
volume_info = {}
attachment = volume.attach_data
@@ -369,7 +367,7 @@ def get_volume_info(volume, state):
},
'tags': volume.tags
}
-
+
return volume_info
def main():
@@ -397,34 +395,32 @@ def main():
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
- volume_type = module.params.get('volume_type')
- iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
-
+
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
-
+
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
-
+
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
+
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
@@ -471,11 +467,11 @@ def main():
if volume_size and (id or snapshot):
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
-
+
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
- volume, changed = detach_volume(module, ec2, volume)
+ volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
@@ -489,4 +485,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py
index 51acbcaa..23ce175b 100644
--- a/cloud/amazon/ec2_vpc_net.py
+++ b/cloud/amazon/ec2_vpc_net.py
@@ -93,9 +93,6 @@ EXAMPLES = '''
'''
-import time
-import sys
-
try:
import boto
import boto.ec2
@@ -136,15 +133,15 @@ def vpc_exists(module, vpc, name, cidr_block, multi):
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
-
+
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
-
+
if tags is None:
tags = dict()
-
+
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
@@ -156,10 +153,10 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name):
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
-
+
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
-
+
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
@@ -211,48 +208,47 @@ def main():
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
-
+
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
+
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
-
+
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
-
+
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
-
+
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
-
- if dhcp_id is not None:
+
+ if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
-
- if tags is not None or name is not None:
+
+ if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
-
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
@@ -263,21 +259,21 @@ def main():
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
-
+
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
-
+
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
-
+
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
-
+
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
@@ -287,11 +283,12 @@ def main():
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
-
+
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
-
+
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py
index b34e3090..fab333f0 100644
--- a/cloud/amazon/rds_param_group.py
+++ b/cloud/amazon/rds_param_group.py
@@ -112,7 +112,7 @@ except ImportError:
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
-class NotModifiableError(StandardError):
+class NotModifiableError(Exception):
def __init__(self, error_message, *args):
super(NotModifiableError, self).__init__(error_message, *args)
self.error_message = error_message
@@ -175,7 +175,7 @@ def modify_group(group, params, immediate=False):
new_params = dict(params)
for key in new_params.keys():
- if group.has_key(key):
+ if key in group:
param = group[key]
new_value = new_params[key]
@@ -281,7 +281,6 @@ def main():
else:
break
-
except BotoServerError, e:
module.fail_json(msg = e.error_message)
@@ -297,4 +296,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()