summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_ami_find.py52
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_ami_search.py14
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py76
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_vpc.py66
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_kms.py33
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudformation.py28
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudfront_facts.py56
-rw-r--r--lib/ansible/modules/cloud/amazon/data_pipeline.py4
-rw-r--r--lib/ansible/modules/cloud/amazon/dynamodb_table.py9
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_ami_copy.py5
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_elb.py6
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_elb_lb.py36
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eni_facts.py10
-rwxr-xr-xlib/ansible/modules/cloud/amazon/ec2_lc.py2
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_lc_facts.py16
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py49
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py24
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_snapshot.py30
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py4
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_tag.py14
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vol.py46
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vol_facts.py7
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py11
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py12
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_net.py54
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py2
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py23
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py18
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_win_password.py16
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_cluster.py29
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_service.py42
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_service_facts.py15
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_task.py33
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py5
-rw-r--r--lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py27
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_application_lb.py1
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_classic_lb.py36
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_instance.py6
-rw-r--r--lib/ansible/modules/cloud/amazon/execute_lambda.py32
-rw-r--r--lib/ansible/modules/cloud/amazon/iam.py19
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_policy.py42
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_subnet_group.py28
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift.py94
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_health_check.py48
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_lifecycle.py37
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_logging.py8
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_sync.py1
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_website.py8
-rw-r--r--lib/ansible/modules/cloud/amazon/sns_topic.py39
-rw-r--r--lib/ansible/modules/cloud/amazon/sts_assume_role.py15
-rw-r--r--lib/ansible/modules/cloud/amazon/sts_session_token.py8
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_deployment.py20
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py1
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py13
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py2
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py1
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py6
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_subnet.py1
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py29
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py3
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py1
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_host.py2
-rw-r--r--lib/ansible/modules/cloud/docker/docker_container.py7
-rw-r--r--lib/ansible/modules/cloud/docker/docker_image.py2
-rw-r--r--lib/ansible/modules/cloud/docker/docker_image_facts.py2
-rw-r--r--lib/ansible/modules/cloud/docker/docker_network.py21
-rw-r--r--lib/ansible/modules/cloud/google/gc_storage.py79
-rw-r--r--lib/ansible/modules/cloud/google/gcdns_record.py190
-rw-r--r--lib/ansible/modules/cloud/google/gcdns_zone.py73
-rw-r--r--lib/ansible/modules/cloud/lxc/lxc_container.py10
-rw-r--r--lib/ansible/modules/cloud/lxd/lxd_container.py7
-rw-r--r--lib/ansible/modules/cloud/misc/rhevm.py139
-rw-r--r--lib/ansible/modules/cloud/misc/serverless.py18
-rw-r--r--lib/ansible/modules/cloud/misc/virt_net.py99
-rw-r--r--lib/ansible/modules/cloud/misc/virt_pool.py130
-rw-r--r--lib/ansible/modules/cloud/misc/xenserver_facts.py3
-rw-r--r--lib/ansible/modules/cloud/openstack/os_image.py32
-rw-r--r--lib/ansible/modules/cloud/openstack/os_keypair.py10
-rw-r--r--lib/ansible/modules/cloud/openstack/os_nova_flavor.py30
-rw-r--r--lib/ansible/modules/cloud/openstack/os_quota.py37
-rw-r--r--lib/ansible/modules/cloud/openstack/os_security_group_rule.py32
-rw-r--r--lib/ansible/modules/cloud/openstack/os_server.py51
-rw-r--r--lib/ansible/modules/cloud/openstack/os_stack.py19
-rw-r--r--lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py2
-rw-r--r--lib/ansible/modules/cloud/packet/packet_sshkey.py28
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks.py16
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py8
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py8
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py4
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py9
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_cdb.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_clb.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py6
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_dns.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_dns_record.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_facts.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_keypair.py2
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py1
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_check.py1
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_entity.py1
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_notification.py1
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py1
-rw-r--r--lib/ansible/modules/cloud/univention/udm_dns_record.py38
-rw-r--r--lib/ansible/modules/cloud/univention/udm_dns_zone.py102
-rw-r--r--lib/ansible/modules/cloud/univention/udm_group.py44
-rw-r--r--lib/ansible/modules/cloud/univention/udm_share.py302
-rw-r--r--lib/ansible/modules/cloud/univention/udm_user.py262
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_dns_config.py4
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_dvs_host.py6
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_guest.py3
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_guest_find.py1
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vmkernel.py18
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py4
-rw-r--r--lib/ansible/modules/cloud/vmware/vsphere_guest.py222
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_app.py31
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_db.py29
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_domain.py22
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_site.py36
-rw-r--r--lib/ansible/modules/clustering/consul_acl.py5
-rw-r--r--lib/ansible/modules/database/misc/kibana_plugin.py21
-rw-r--r--lib/ansible/modules/database/misc/riak.py19
-rw-r--r--lib/ansible/modules/database/mongodb/mongodb_parameter.py2
-rw-r--r--lib/ansible/modules/database/mongodb/mongodb_user.py16
-rw-r--r--lib/ansible/modules/database/mssql/mssql_db.py3
-rw-r--r--lib/ansible/modules/database/postgresql/postgresql_ext.py17
-rw-r--r--lib/ansible/modules/database/postgresql/postgresql_lang.py21
-rw-r--r--lib/ansible/modules/database/postgresql/postgresql_schema.py20
-rw-r--r--lib/ansible/modules/database/vertica/vertica_configuration.py11
-rw-r--r--lib/ansible/modules/database/vertica/vertica_facts.py22
-rw-r--r--lib/ansible/modules/database/vertica/vertica_role.py19
-rw-r--r--lib/ansible/modules/database/vertica/vertica_schema.py21
-rw-r--r--lib/ansible/modules/database/vertica/vertica_user.py31
-rw-r--r--lib/ansible/modules/monitoring/bigpanda.py2
-rw-r--r--lib/ansible/modules/monitoring/datadog_event.py2
-rw-r--r--lib/ansible/modules/monitoring/icinga2_feature.py2
-rw-r--r--lib/ansible/modules/monitoring/librato_annotation.py23
-rw-r--r--lib/ansible/modules/monitoring/logentries.py16
-rw-r--r--lib/ansible/modules/monitoring/logicmonitor.py49
-rw-r--r--lib/ansible/modules/monitoring/logicmonitor_facts.py10
-rw-r--r--lib/ansible/modules/monitoring/nagios.py22
-rw-r--r--lib/ansible/modules/monitoring/newrelic_deployment.py5
-rw-r--r--lib/ansible/modules/monitoring/pagerduty.py16
-rw-r--r--lib/ansible/modules/monitoring/pingdom.py4
-rw-r--r--lib/ansible/modules/monitoring/sensu_check.py48
-rw-r--r--lib/ansible/modules/monitoring/sensu_subscription.py6
-rw-r--r--lib/ansible/modules/monitoring/stackdriver.py4
-rw-r--r--lib/ansible/modules/monitoring/uptimerobot.py8
-rw-r--r--lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py4
-rw-r--r--lib/ansible/modules/net_tools/cloudflare_dns.py198
-rw-r--r--lib/ansible/modules/net_tools/dnsimple.py34
-rw-r--r--lib/ansible/modules/net_tools/dnsmadeeasy.py5
-rw-r--r--lib/ansible/modules/net_tools/ipinfoio_facts.py4
-rw-r--r--lib/ansible/modules/net_tools/nmcli.py355
-rw-r--r--lib/ansible/modules/net_tools/omapi_host.py2
-rw-r--r--lib/ansible/modules/net_tools/snmp_facts.py45
-rw-r--r--lib/ansible/modules/network/a10/a10_server_axapi3.py7
-rw-r--r--lib/ansible/modules/network/a10/a10_virtual_server.py3
-rw-r--r--lib/ansible/modules/network/aos/aos_asn_pool.py66
-rw-r--r--lib/ansible/modules/network/aos/aos_blueprint.py15
-rw-r--r--lib/ansible/modules/network/aos/aos_blueprint_param.py20
-rw-r--r--lib/ansible/modules/network/aos/aos_blueprint_virtnet.py22
-rw-r--r--lib/ansible/modules/network/aos/aos_device.py18
-rw-r--r--lib/ansible/modules/network/aos/aos_external_router.py57
-rw-r--r--lib/ansible/modules/network/aos/aos_ip_pool.py54
-rw-r--r--lib/ansible/modules/network/aos/aos_logical_device.py36
-rw-r--r--lib/ansible/modules/network/aos/aos_logical_device_map.py40
-rw-r--r--lib/ansible/modules/network/aos/aos_rack_type.py40
-rw-r--r--lib/ansible/modules/network/aos/aos_template.py38
-rw-r--r--lib/ansible/modules/network/asa/asa_acl.py2
-rw-r--r--lib/ansible/modules/network/asa/asa_command.py1
-rw-r--r--lib/ansible/modules/network/asa/asa_config.py4
-rwxr-xr-xlib/ansible/modules/network/bigswitch/bigmon_chain.py5
-rw-r--r--lib/ansible/modules/network/bigswitch/bigmon_policy.py23
-rw-r--r--lib/ansible/modules/network/cnos/cnos_bgp.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_command.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_conditional_command.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_image.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_interface.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_portchannel.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_rollback.py2
-rw-r--r--lib/ansible/modules/network/cnos/cnos_vlan.py2
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_bond.py1
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_img_install.py3
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_license.py2
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_ports.py5
-rw-r--r--lib/ansible/modules/network/cumulus/nclu.py18
-rw-r--r--lib/ansible/modules/network/eos/eos_banner.py3
-rw-r--r--lib/ansible/modules/network/eos/eos_command.py4
-rw-r--r--lib/ansible/modules/network/eos/eos_config.py1
-rw-r--r--lib/ansible/modules/network/eos/eos_eapi.py14
-rw-r--r--lib/ansible/modules/network/eos/eos_facts.py4
-rw-r--r--lib/ansible/modules/network/eos/eos_system.py17
-rw-r--r--lib/ansible/modules/network/eos/eos_user.py5
-rw-r--r--lib/ansible/modules/network/f5/bigip_monitor_tcp.py6
-rw-r--r--lib/ansible/modules/network/f5/bigip_ucs.py1
-rw-r--r--lib/ansible/modules/network/f5/bigip_wait.py1
-rw-r--r--lib/ansible/modules/network/fortios/fortios_config.py41
-rw-r--r--lib/ansible/modules/network/fortios/fortios_ipv4_policy.py46
-rw-r--r--lib/ansible/modules/network/illumos/dladm_iptun.py1
-rw-r--r--lib/ansible/modules/network/illumos/dladm_linkprop.py1
-rw-r--r--lib/ansible/modules/network/ios/ios_banner.py4
-rw-r--r--lib/ansible/modules/network/ios/ios_command.py4
-rw-r--r--lib/ansible/modules/network/ios/ios_facts.py3
-rw-r--r--lib/ansible/modules/network/ios/ios_system.py15
-rw-r--r--lib/ansible/modules/network/ios/ios_vrf.py9
-rw-r--r--lib/ansible/modules/network/netscaler/netscaler_lb_vserver.py4
-rw-r--r--lib/ansible/modules/network/netvisor/pn_cluster.py4
-rw-r--r--lib/ansible/modules/network/netvisor/pn_ospfarea.py4
-rw-r--r--lib/ansible/modules/network/netvisor/pn_vlag.py6
-rw-r--r--lib/ansible/modules/network/netvisor/pn_vlan.py6
-rw-r--r--lib/ansible/modules/network/netvisor/pn_vrouter.py4
-rw-r--r--lib/ansible/modules/network/netvisor/pn_vrouterbgp.py2
-rw-r--r--lib/ansible/modules/network/netvisor/pn_vrouterif.py4
-rw-r--r--lib/ansible/modules/network/netvisor/pn_vrouterlbif.py4
-rw-r--r--lib/ansible/modules/network/nxos/_nxos_mtu.py1
-rw-r--r--lib/ansible/modules/network/nxos/nxos_aaa_server_host.py4
-rw-r--r--lib/ansible/modules/network/nxos/nxos_config.py7
-rw-r--r--lib/ansible/modules/network/nxos/nxos_gir.py42
-rw-r--r--lib/ansible/modules/network/nxos/nxos_gir_profile_management.py5
-rw-r--r--lib/ansible/modules/network/nxos/nxos_igmp.py4
-rw-r--r--lib/ansible/modules/network/nxos/nxos_igmp_interface.py12
-rw-r--r--lib/ansible/modules/network/nxos/nxos_igmp_snooping.py4
-rw-r--r--lib/ansible/modules/network/nxos/nxos_ip_interface.py14
-rw-r--r--lib/ansible/modules/network/nxos/nxos_logging.py2
-rw-r--r--lib/ansible/modules/network/nxos/nxos_ntp_auth.py5
-rw-r--r--lib/ansible/modules/network/nxos/nxos_ntp_options.py3
-rw-r--r--lib/ansible/modules/network/nxos/nxos_nxapi.py12
-rw-r--r--lib/ansible/modules/network/nxos/nxos_overlay_global.py6
-rw-r--r--lib/ansible/modules/network/nxos/nxos_ping.py6
-rw-r--r--lib/ansible/modules/network/nxos/nxos_smu.py3
-rw-r--r--lib/ansible/modules/network/nxos/nxos_snapshot.py3
-rw-r--r--lib/ansible/modules/network/nxos/nxos_static_route.py2
-rw-r--r--lib/ansible/modules/network/nxos/nxos_system.py18
-rw-r--r--lib/ansible/modules/network/nxos/nxos_udld.py6
-rw-r--r--lib/ansible/modules/network/nxos/nxos_udld_interface.py16
-rw-r--r--lib/ansible/modules/network/nxos/nxos_user.py19
-rw-r--r--lib/ansible/modules/network/nxos/nxos_vrf.py4
-rw-r--r--lib/ansible/modules/network/nxos/nxos_vtp_domain.py2
-rw-r--r--lib/ansible/modules/network/nxos/nxos_vtp_password.py4
-rw-r--r--lib/ansible/modules/network/nxos/nxos_vtp_version.py2
-rw-r--r--lib/ansible/modules/network/ordnance/ordnance_config.py9
-rw-r--r--lib/ansible/modules/network/ordnance/ordnance_facts.py1
-rw-r--r--lib/ansible/modules/network/ovs/openvswitch_bridge.py8
-rw-r--r--lib/ansible/modules/network/ovs/openvswitch_port.py11
-rw-r--r--lib/ansible/modules/network/panos/panos_admin.py2
-rw-r--r--lib/ansible/modules/network/panos/panos_admpwd.py10
-rw-r--r--lib/ansible/modules/network/panos/panos_cert_gen_ssh.py12
-rw-r--r--lib/ansible/modules/network/panos/panos_check.py4
-rw-r--r--lib/ansible/modules/network/panos/panos_dag.py2
-rw-r--r--lib/ansible/modules/network/panos/panos_import.py4
-rw-r--r--lib/ansible/modules/network/panos/panos_interface.py10
-rw-r--r--lib/ansible/modules/network/panos/panos_loadcfg.py2
-rw-r--r--lib/ansible/modules/network/panos/panos_mgtconfig.py10
-rw-r--r--lib/ansible/modules/network/panos/panos_pg.py2
-rw-r--r--lib/ansible/modules/network/sros/sros_command.py3
-rw-r--r--lib/ansible/modules/network/sros/sros_config.py1
-rw-r--r--lib/ansible/modules/network/sros/sros_rollback.py9
-rw-r--r--lib/ansible/modules/network/vyos/vyos_command.py1
-rw-r--r--lib/ansible/modules/network/vyos/vyos_system.py1
-rw-r--r--lib/ansible/modules/notification/campfire.py8
-rw-r--r--lib/ansible/modules/notification/flowdock.py8
-rw-r--r--lib/ansible/modules/notification/grove.py16
-rw-r--r--lib/ansible/modules/notification/hall.py4
-rw-r--r--lib/ansible/modules/notification/irc.py10
-rw-r--r--lib/ansible/modules/notification/jabber.py12
-rw-r--r--lib/ansible/modules/notification/mattermost.py46
-rw-r--r--lib/ansible/modules/notification/mqtt.py54
-rw-r--r--lib/ansible/modules/notification/osx_say.py4
-rw-r--r--lib/ansible/modules/notification/pushbullet.py32
-rw-r--r--lib/ansible/modules/notification/pushover.py10
-rw-r--r--lib/ansible/modules/notification/rocketchat.py30
-rw-r--r--lib/ansible/modules/notification/sendgrid.py25
-rw-r--r--lib/ansible/modules/notification/slack.py34
-rw-r--r--lib/ansible/modules/notification/twilio.py10
-rw-r--r--lib/ansible/modules/packaging/language/bundler.py6
-rw-r--r--lib/ansible/modules/packaging/language/cpanm.py21
-rw-r--r--lib/ansible/modules/packaging/language/gem.py50
-rw-r--r--lib/ansible/modules/packaging/language/maven_artifact.py37
-rw-r--r--lib/ansible/modules/packaging/language/pear.py19
-rw-r--r--lib/ansible/modules/packaging/os/apk.py10
-rw-r--r--lib/ansible/modules/packaging/os/dpkg_selections.py1
-rw-r--r--lib/ansible/modules/packaging/os/homebrew.py6
-rw-r--r--lib/ansible/modules/packaging/os/homebrew_cask.py8
-rw-r--r--lib/ansible/modules/packaging/os/layman.py16
-rw-r--r--lib/ansible/modules/packaging/os/macports.py9
-rw-r--r--lib/ansible/modules/packaging/os/opkg.py1
-rw-r--r--lib/ansible/modules/packaging/os/pkgin.py39
-rw-r--r--lib/ansible/modules/packaging/os/pkgng.py53
-rw-r--r--lib/ansible/modules/packaging/os/pkgutil.py37
-rw-r--r--lib/ansible/modules/packaging/os/portage.py4
-rw-r--r--lib/ansible/modules/packaging/os/portinstall.py14
-rw-r--r--lib/ansible/modules/packaging/os/slackpkg.py6
-rw-r--r--lib/ansible/modules/packaging/os/sorcery.py20
-rw-r--r--lib/ansible/modules/packaging/os/svr4pkg.py38
-rw-r--r--lib/ansible/modules/packaging/os/swdepot.py24
-rw-r--r--lib/ansible/modules/packaging/os/zypper.py26
-rw-r--r--lib/ansible/modules/packaging/os/zypper_repository.py18
-rw-r--r--lib/ansible/modules/storage/infinidat/infini_export.py14
-rw-r--r--lib/ansible/modules/storage/infinidat/infini_export_client.py22
-rw-r--r--lib/ansible/modules/storage/infinidat/infini_fs.py14
-rw-r--r--lib/ansible/modules/storage/infinidat/infini_host.py14
-rw-r--r--lib/ansible/modules/storage/infinidat/infini_pool.py26
-rw-r--r--lib/ansible/modules/storage/infinidat/infini_vol.py12
-rw-r--r--lib/ansible/modules/storage/netapp/na_cdot_qtree.py2
-rw-r--r--lib/ansible/modules/storage/netapp/na_cdot_volume.py2
-rw-r--r--lib/ansible/modules/storage/netapp/netapp_e_amg.py2
-rw-r--r--lib/ansible/modules/storage/netapp/netapp_e_storagepool.py2
-rw-r--r--lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py4
-rw-r--r--lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py2
-rw-r--r--lib/ansible/modules/storage/netapp/sf_volume_manager.py6
-rw-r--r--lib/ansible/modules/system/cron.py1
-rw-r--r--lib/ansible/modules/web_infrastructure/letsencrypt.py3
-rw-r--r--test/sanity/pep8/legacy-files.txt253
314 files changed, 3464 insertions, 3385 deletions
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py b/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
index fa67add809..324405647a 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
@@ -303,7 +303,7 @@ def get_block_device_mapping(image):
"""
bdm_dict = dict()
- bdm = getattr(image,'block_device_mapping')
+ bdm = getattr(image, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
@@ -319,28 +319,28 @@ def get_block_device_mapping(image):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- owner = dict(required=False, default=None),
- ami_id = dict(required=False),
- ami_tags = dict(required=False, type='dict',
- aliases = ['search_tags', 'image_tags']),
- architecture = dict(required=False),
- hypervisor = dict(required=False),
- is_public = dict(required=False, type='bool'),
- name = dict(required=False),
- platform = dict(required=False),
- product_code = dict(required=False),
- sort = dict(required=False, default=None,
- choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
- 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
- sort_tag = dict(required=False),
- sort_order = dict(required=False, default='ascending',
- choices=['ascending', 'descending']),
- sort_start = dict(required=False),
- sort_end = dict(required=False),
- state = dict(required=False, default='available'),
- virtualization_type = dict(required=False),
- no_result_action = dict(required=False, default='success',
- choices = ['success', 'fail']),
+ owner=dict(required=False, default=None),
+ ami_id=dict(required=False),
+ ami_tags=dict(required=False, type='dict',
+ aliases=['search_tags', 'image_tags']),
+ architecture=dict(required=False),
+ hypervisor=dict(required=False),
+ is_public=dict(required=False, type='bool'),
+ name=dict(required=False),
+ platform=dict(required=False),
+ product_code=dict(required=False),
+ sort=dict(required=False, default=None,
+ choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
+ 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
+ sort_tag=dict(required=False),
+ sort_order=dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start=dict(required=False),
+ sort_end=dict(required=False),
+ state=dict(required=False, default='available'),
+ virtualization_type=dict(required=False),
+ no_result_action=dict(required=False, default='success',
+ choices=['success', 'fail']),
)
)
@@ -379,7 +379,7 @@ def main():
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
- filter['tag:'+tag] = ami_tags[tag]
+ filter['tag:' + tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
@@ -435,9 +435,9 @@ def main():
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
- results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+ results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order == 'descending'))
elif sort:
- results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+ results.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
try:
if sort and sort_start and sort_end:
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py b/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
index 3235b1b84c..d270e159c3 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
@@ -136,9 +136,9 @@ def ubuntu(module):
reader = csv.reader(req, delimiter='\t')
try:
ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
- store, arch, region, virt)
+ store, arch, region, virt)
module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
- serial=serial)
+ serial=serial)
except KeyError:
module.fail_json(msg="No matching AMI found")
@@ -163,7 +163,7 @@ def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
actual_store, actual_arch, actual_region, ami, aki, ari,
actual_virt) = row
actual = (actual_release, actual_stream, actual_store, actual_arch,
- actual_region, actual_virt)
+ actual_region, actual_virt)
if actual == expected:
# aki and ari are sometimes blank
if aki == '':
@@ -185,14 +185,14 @@ def main():
distro=dict(required=True, choices=SUPPORTED_DISTROS),
release=dict(required=True),
stream=dict(required=False, default='server',
- choices=['desktop', 'server']),
+ choices=['desktop', 'server']),
store=dict(required=False, default='ebs',
- choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
+ choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
arch=dict(required=False, default='amd64',
- choices=['i386', 'amd64']),
+ choices=['i386', 'amd64']),
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
virt=dict(required=False, default='paravirtual',
- choices=['paravirtual', 'hvm']),
+ choices=['paravirtual', 'hvm']),
)
module = AnsibleModule(argument_spec=arg_spec)
distro = module.params['distro']
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
index f07cbe88cc..57e342e3d7 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
@@ -74,12 +74,12 @@ def get_instance_info(instance):
# Get groups
groups = []
for group in instance.groups:
- groups.append({ 'id': group.id, 'name': group.name }.copy())
+ groups.append({'id': group.id, 'name': group.name}.copy())
# Get interfaces
interfaces = []
for interface in instance.interfaces:
- interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
+ interfaces.append({'id': interface.id, 'mac_address': interface.mac_address}.copy())
# If an instance is terminated, sourceDestCheck is no longer returned
try:
@@ -104,41 +104,41 @@ def get_instance_info(instance):
instance_profile = dict(instance.instance_profile) if instance.instance_profile is not None else None
- instance_info = { 'id': instance.id,
- 'kernel': instance.kernel,
- 'instance_profile': instance_profile,
- 'root_device_type': instance.root_device_type,
- 'private_dns_name': instance.private_dns_name,
- 'public_dns_name': instance.public_dns_name,
- 'ebs_optimized': instance.ebs_optimized,
- 'client_token': instance.client_token,
- 'virtualization_type': instance.virtualization_type,
- 'architecture': instance.architecture,
- 'ramdisk': instance.ramdisk,
- 'tags': instance.tags,
- 'key_name': instance.key_name,
- 'source_destination_check': source_dest_check,
- 'image_id': instance.image_id,
- 'groups': groups,
- 'interfaces': interfaces,
- 'spot_instance_request_id': instance.spot_instance_request_id,
- 'requester_id': instance.requester_id,
- 'monitoring_state': instance.monitoring_state,
- 'placement': {
- 'tenancy': instance._placement.tenancy,
- 'zone': instance._placement.zone
- },
- 'ami_launch_index': instance.ami_launch_index,
- 'launch_time': instance.launch_time,
- 'hypervisor': instance.hypervisor,
- 'region': instance.region.name,
- 'persistent': instance.persistent,
- 'private_ip_address': instance.private_ip_address,
- 'public_ip_address': instance.ip_address,
- 'state': instance._state.name,
- 'vpc_id': instance.vpc_id,
- 'block_device_mapping': bdm_dict,
- }
+ instance_info = {'id': instance.id,
+ 'kernel': instance.kernel,
+ 'instance_profile': instance_profile,
+ 'root_device_type': instance.root_device_type,
+ 'private_dns_name': instance.private_dns_name,
+ 'public_dns_name': instance.public_dns_name,
+ 'ebs_optimized': instance.ebs_optimized,
+ 'client_token': instance.client_token,
+ 'virtualization_type': instance.virtualization_type,
+ 'architecture': instance.architecture,
+ 'ramdisk': instance.ramdisk,
+ 'tags': instance.tags,
+ 'key_name': instance.key_name,
+ 'source_destination_check': source_dest_check,
+ 'image_id': instance.image_id,
+ 'groups': groups,
+ 'interfaces': interfaces,
+ 'spot_instance_request_id': instance.spot_instance_request_id,
+ 'requester_id': instance.requester_id,
+ 'monitoring_state': instance.monitoring_state,
+ 'placement': {
+ 'tenancy': instance._placement.tenancy,
+ 'zone': instance._placement.zone
+ },
+ 'ami_launch_index': instance.ami_launch_index,
+ 'launch_time': instance.launch_time,
+ 'hypervisor': instance.hypervisor,
+ 'region': instance.region.name,
+ 'persistent': instance.persistent,
+ 'private_ip_address': instance.private_ip_address,
+ 'public_ip_address': instance.ip_address,
+ 'state': instance._state.name,
+ 'vpc_id': instance.vpc_id,
+ 'block_device_mapping': bdm_dict,
+ }
return instance_info
@@ -163,7 +163,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- filters = dict(default=None, type='dict')
+ filters=dict(default=None, type='dict')
)
)
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
index ab6f3a71d1..2b1b88862e 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
@@ -189,6 +189,7 @@ def get_vpc_info(vpc):
'state': vpc.state,
})
+
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
@@ -211,7 +212,7 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
- found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
+ found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available', })
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
@@ -234,8 +235,8 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
return (found_vpc)
-def routes_match(rt_list=None, rt=None, igw=None):
+def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
@@ -284,6 +285,7 @@ def routes_match(rt_list=None, rt=None, igw=None):
else:
return True
+
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
@@ -299,7 +301,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
False when both routes and subnet associations matched.
"""
- #We add a one for the main table
+ # We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
@@ -307,13 +309,13 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
- rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
+ rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
if len(rsn) != 1:
module.fail_json(
- msg='The subnet {0} to associate with route_table {1} ' \
+ msg='The subnet {0} to associate with route_table {1} '
'does not exist, aborting'.format(sn, rt)
)
- nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
+ nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
@@ -388,10 +390,10 @@ def create_vpc(module, vpc_conn):
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
- module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
+ module.fail_json(msg="wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
@@ -408,7 +410,6 @@ def create_vpc(module, vpc_conn):
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
-
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
@@ -420,7 +421,7 @@ def create_vpc(module, vpc_conn):
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
- current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
+ current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
# First add all new subnets
for subnet in subnets:
@@ -468,7 +469,7 @@ def create_vpc(module, vpc_conn):
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
- while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
+ while len(vpc_conn.get_all_subnets(filters={'subnet-id': new_subnet.id})) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
@@ -548,7 +549,7 @@ def create_vpc(module, vpc_conn):
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
- msg='You asked for an Internet Gateway ' \
+ msg='You asked for an Internet Gateway '
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
@@ -564,10 +565,10 @@ def create_vpc(module, vpc_conn):
# Associate with subnets
for sn in rt['subnets']:
- rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
+ rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
if len(rsn) != 1:
module.fail_json(
- msg='The subnet {0} to associate with route_table {1} ' \
+ msg='The subnet {0} to associate with route_table {1} '
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
@@ -576,7 +577,7 @@ def create_vpc(module, vpc_conn):
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
- old_rt = [ x for x in old_rt if x.id is not None ]
+ old_rt = [x for x in old_rt if x.id is not None]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
@@ -591,7 +592,7 @@ def create_vpc(module, vpc_conn):
changed = True
except EC2ResponseError as e:
module.fail_json(
- msg='Unable to create and associate route table {0}, error: ' \
+ msg='Unable to create and associate route table {0}, error: '
'{1}'.format(rt, e)
)
@@ -625,7 +626,7 @@ def create_vpc(module, vpc_conn):
created_vpc_id = vpc.id
returned_subnets = []
- current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
+ current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in current_subnets:
returned_subnets.append({
@@ -647,6 +648,7 @@ def create_vpc(module, vpc_conn):
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
+
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
@@ -671,8 +673,8 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
if vpc is not None:
if vpc.state == 'available':
- terminated_vpc_id=vpc.id
- vpc_dict=get_vpc_info(vpc)
+ terminated_vpc_id = vpc.id
+ vpc_dict = get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
@@ -709,18 +711,18 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- cidr_block = dict(),
- instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
- wait = dict(type='bool', default=False),
- wait_timeout = dict(default=300),
- dns_support = dict(type='bool', default=True),
- dns_hostnames = dict(type='bool', default=True),
- subnets = dict(type='list'),
- vpc_id = dict(),
- internet_gateway = dict(type='bool', default=False),
- resource_tags = dict(type='dict', required=True),
- route_tables = dict(type='list'),
- state = dict(choices=['present', 'absent'], default='present'),
+ cidr_block=dict(),
+ instance_tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(default=300),
+ dns_support=dict(type='bool', default=True),
+ dns_hostnames=dict(type='bool', default=True),
+ subnets=dict(type='list'),
+ vpc_id=dict(),
+ internet_gateway=dict(type='bool', default=False),
+ resource_tags=dict(type='dict', required=True),
+ route_tables=dict(type='list'),
+ state=dict(choices=['present', 'absent'], default='present'),
)
)
@@ -740,7 +742,7 @@ def main():
try:
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg = str(e))
+ module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
diff --git a/lib/ansible/modules/cloud/amazon/aws_kms.py b/lib/ansible/modules/cloud/amazon/aws_kms.py
index 1c6ebd21af..27750f60d7 100644
--- a/lib/ansible/modules/cloud/amazon/aws_kms.py
+++ b/lib/ansible/modules/cloud/amazon/aws_kms.py
@@ -120,6 +120,7 @@ try:
except ImportError:
HAS_BOTO3 = False
+
def get_arn_from_kms_alias(kms, aliasname):
ret = kms.list_aliases()
key_id = None
@@ -138,12 +139,14 @@ def get_arn_from_kms_alias(kms, aliasname):
return k['KeyArn']
raise Exception('could not find key from id: {}'.format(key_id))
+
def get_arn_from_role_name(iam, rolename):
ret = iam.get_role(RoleName=rolename)
if ret.get('Role') and ret['Role'].get('Arn'):
return ret['Role']['Arn']
raise Exception('could not find arn for name {}.'.format(rolename))
+
def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
ret = {}
keyret = kms.get_key_policy(KeyId=keyarn, PolicyName='default')
@@ -179,10 +182,10 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
statement['Principal']['AWS'] = valid_entries
had_invalid_entries = True
- if not role_arn in statement['Principal']['AWS']: # needs to be added.
+ if not role_arn in statement['Principal']['AWS']: # needs to be added.
changes_needed[granttype] = 'add'
statement['Principal']['AWS'].append(role_arn)
- elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
+ elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
changes_needed[granttype] = 'remove'
statement['Principal']['AWS'].remove(role_arn)
@@ -210,6 +213,7 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
return ret
+
def assert_policy_shape(policy):
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
errors = []
@@ -218,7 +222,7 @@ def assert_policy_shape(policy):
found_statement_type = {}
for statement in policy['Statement']:
- for label,sidlabel in statement_label.items():
+ for label, sidlabel in statement_label.items():
if statement['Sid'] == sidlabel:
found_statement_type[label] = True
@@ -230,16 +234,17 @@ def assert_policy_shape(policy):
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {}'.format(' '.join(errors)))
return None
+
def main():
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
- mode = dict(choices=['grant', 'deny'], default='grant'),
- key_alias = dict(required=False, type='str'),
- key_arn = dict(required=False, type='str'),
- role_name = dict(required=False, type='str'),
- role_arn = dict(required=False, type='str'),
- grant_types = dict(required=False, type='list'),
- clean_invalid_entries = dict(type='bool', default=True),
+ mode=dict(choices=['grant', 'deny'], default='grant'),
+ key_alias=dict(required=False, type='str'),
+ key_arn=dict(required=False, type='str'),
+ role_name=dict(required=False, type='str'),
+ role_arn=dict(required=False, type='str'),
+ grant_types=dict(required=False, type='list'),
+ clean_invalid_entries=dict(type='bool', default=True),
)
)
@@ -255,7 +260,6 @@ def main():
result = {}
mode = module.params['mode']
-
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
kms = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_kwargs)
@@ -263,7 +267,6 @@ def main():
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
-
try:
if module.params['key_alias'] and not module.params['key_arn']:
module.params['key_arn'] = get_arn_from_kms_alias(kms, module.params['key_alias'])
@@ -282,9 +285,9 @@ def main():
module.fail_json(msg='{} is an unknown grant type.'.format(g))
ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'],
- mode=mode,
- dry_run=module.check_mode,
- clean_invalid_entries=module.params['clean_invalid_entries'])
+ mode=mode,
+ dry_run=module.check_mode,
+ clean_invalid_entries=module.params['clean_invalid_entries'])
result.update(ret)
except Exception as err:
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation.py b/lib/ansible/modules/cloud/amazon/cloudformation.py
index 87f6281688..920a4d7aaa 100644
--- a/lib/ansible/modules/cloud/amazon/cloudformation.py
+++ b/lib/ansible/modules/cloud/amazon/cloudformation.py
@@ -263,7 +263,7 @@ from ansible.module_utils._text import to_bytes, to_native
def get_stack_events(cfn, stack_name, token_filter=None):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
- ret = {'events':[], 'log':[]}
+ ret = {'events': [], 'log': []}
try:
pg = cfn.get_paginator(
@@ -348,8 +348,8 @@ def create_changeset(module, stack_params, cfn):
cs = cfn.create_change_set(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET')
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
- 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
- 'NOTE that dependencies on this stack might fail due to pending changes!']
+ 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
+ 'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
@@ -413,7 +413,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
except:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
- if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
@@ -421,12 +421,12 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name, op_token)
if not stack:
- if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
- ret.update({'changed': False, 'failed': True, 'output' : 'Stack not found.'})
+ ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
@@ -435,7 +435,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
return ret
# note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
- ret.update({'changed': True, 'output' : 'Stack %s complete' % operation })
+ ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
@@ -447,7 +447,7 @@ def stack_operation(cfn, stack_name, operation, op_token=None):
else:
# this can loop forever :/
time.sleep(5)
- return {'failed': True, 'output':'Failed for unknown reasons.'}
+ return {'failed': True, 'output': 'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
@@ -470,7 +470,7 @@ def check_mode_changeset(module, stack_params, cfn):
try:
change_set = cfn.create_change_set(**stack_params)
- for i in range(60): # total time 5 min
+ for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
@@ -496,7 +496,7 @@ def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
- except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err:
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
@@ -567,7 +567,7 @@ def main():
stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
template_parameters = module.params['template_parameters']
- stack_params['Parameters'] = [{'ParameterKey':k, 'ParameterValue':str(v)} for k, v in template_parameters.items()]
+ stack_params['Parameters'] = [{'ParameterKey': k, 'ParameterValue': str(v)} for k, v in template_parameters.items()]
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
@@ -637,7 +637,7 @@ def main():
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
- "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
@@ -658,8 +658,8 @@ def main():
if module.params['template_format'] is not None:
result['warnings'] = [('Argument `template_format` is deprecated '
- 'since Ansible 2.3, JSON and YAML templates are now passed '
- 'directly to the CloudFormation API.')]
+ 'since Ansible 2.3, JSON and YAML templates are now passed '
+ 'directly to the CloudFormation API.')]
module.exit_json(**result)
diff --git a/lib/ansible/modules/cloud/amazon/cloudfront_facts.py b/lib/ansible/modules/cloud/amazon/cloudfront_facts.py
index 3bc68f719d..9b0f97c732 100644
--- a/lib/ansible/modules/cloud/amazon/cloudfront_facts.py
+++ b/lib/ansible/modules/cloud/amazon/cloudfront_facts.py
@@ -259,7 +259,7 @@ class CloudFrontServiceManager:
def get_distribution(self, distribution_id):
try:
- func = partial(self.client.get_distribution,Id=distribution_id)
+ func = partial(self.client.get_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution - " + str(e),
@@ -268,7 +268,7 @@ class CloudFrontServiceManager:
def get_distribution_config(self, distribution_id):
try:
- func = partial(self.client.get_distribution_config,Id=distribution_id)
+ func = partial(self.client.get_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
@@ -277,7 +277,7 @@ class CloudFrontServiceManager:
def get_origin_access_identity(self, origin_access_identity_id):
try:
- func = partial(self.client.get_cloud_front_origin_access_identity,Id=origin_access_identity_id)
+ func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity - " + str(e),
@@ -286,7 +286,7 @@ class CloudFrontServiceManager:
def get_origin_access_identity_config(self, origin_access_identity_id):
try:
- func = partial(self.client.get_cloud_front_origin_access_identity_config,Id=origin_access_identity_id)
+ func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
@@ -295,7 +295,7 @@ class CloudFrontServiceManager:
def get_invalidation(self, distribution_id, invalidation_id):
try:
- func = partial(self.client.get_invalidation,DistributionId=distribution_id,Id=invalidation_id)
+ func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing invalidation - " + str(e),
@@ -304,7 +304,7 @@ class CloudFrontServiceManager:
def get_streaming_distribution(self, distribution_id):
try:
- func = partial(self.client.get_streaming_distribution,Id=distribution_id)
+ func = partial(self.client.get_streaming_distribution, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
@@ -313,7 +313,7 @@ class CloudFrontServiceManager:
def get_streaming_distribution_config(self, distribution_id):
try:
- func = partial(self.client.get_streaming_distribution_config,Id=distribution_id)
+ func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
return self.paginated_response(func)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
@@ -399,13 +399,13 @@ class CloudFrontServiceManager:
def summary_get_origin_access_identity_list(self):
try:
- origin_access_identity_list = { 'origin_access_identities': [] }
+ origin_access_identity_list = {'origin_access_identities': []}
origin_access_identities = self.list_origin_access_identities()
for origin_access_identity in origin_access_identities:
oai_id = origin_access_identity['Id']
oai_full_response = self.get_origin_access_identity(oai_id)
- oai_summary = { 'Id': oai_id, 'ETag': oai_full_response['ETag'] }
- origin_access_identity_list['origin_access_identities'].append( oai_summary )
+ oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
+ origin_access_identity_list['origin_access_identities'].append(oai_summary)
return origin_access_identity_list
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
@@ -415,8 +415,8 @@ class CloudFrontServiceManager:
def summary_get_distribution_list(self, streaming=False):
try:
list_name = 'streaming_distributions' if streaming else 'distributions'
- key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled' ]
- distribution_list = { list_name: [] }
+ key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
+ distribution_list = {list_name: []}
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
for dist in distributions:
temp_distribution = {}
@@ -520,16 +520,18 @@ class CloudFrontServiceManager:
if 'Items' in item['Aliases']:
aliases = item['Aliases']['Items']
for alias in aliases:
- keyed_list.update( { alias: item } )
- keyed_list.update( { distribution_id: item } )
+ keyed_list.update({alias: item})
+ keyed_list.update({distribution_id: item})
return keyed_list
+
def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
facts[distribution_id].update(details)
for alias in aliases:
facts[alias].update(details)
return facts
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
@@ -581,17 +583,17 @@ def main():
summary = module.params.get('summary')
aliases = []
- result = { 'cloudfront': {} }
+ result = {'cloudfront': {}}
facts = {}
require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
- streaming_distribution_config or list_invalidations)
+ streaming_distribution_config or list_invalidations)
# set default to summary if no option specified
summary = summary or not (distribution or distribution_config or origin_access_identity or
- origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
- list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
- list_streaming_distributions or list_distributions)
+ origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
+ list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
+ list_streaming_distributions or list_distributions)
# validations
if require_distribution_id and distribution_id is None and domain_name_alias is None:
@@ -611,21 +613,21 @@ def main():
# set appropriate cloudfront id
if distribution_id and not list_invalidations:
- facts = { distribution_id: {} }
+ facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
- facts.update( { alias: {} } )
+ facts.update({alias: {}})
if invalidation_id:
- facts.update( { invalidation_id: {} } )
+ facts.update({invalidation_id: {}})
elif distribution_id and list_invalidations:
- facts = { distribution_id: {} }
+ facts = {distribution_id: {}}
aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
for alias in aliases:
- facts.update( { alias: {} } )
+ facts.update({alias: {}})
elif origin_access_identity_id:
- facts = { origin_access_identity_id: {} }
+ facts = {origin_access_identity_id: {}}
elif web_acl_id:
- facts = { web_acl_id: {} }
+ facts = {web_acl_id: {}}
# get details based on options
if distribution:
@@ -644,7 +646,7 @@ def main():
if streaming_distribution_config:
facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
if list_invalidations:
- facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id) }
+ facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
if 'facts_to_set' in vars():
facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
diff --git a/lib/ansible/modules/cloud/amazon/data_pipeline.py b/lib/ansible/modules/cloud/amazon/data_pipeline.py
index f17e092548..168623cf41 100644
--- a/lib/ansible/modules/cloud/amazon/data_pipeline.py
+++ b/lib/ansible/modules/cloud/amazon/data_pipeline.py
@@ -332,7 +332,7 @@ def activate_pipeline(client, module):
pass
else:
module.fail_json(msg=('Data Pipeline {0} failed to activate '
- 'within timeout {1} seconds').format(dp_name, timeout))
+ 'within timeout {1} seconds').format(dp_name, timeout))
changed = True
data_pipeline = get_result(client, dp_id)
@@ -477,7 +477,7 @@ def diff_pipeline(client, module, objects, unique_id, dp_name):
result = {'data_pipeline': data_pipeline,
'msg': msg}
except DataPipelineNotFound:
- create_dp = True
+ create_dp = True
return create_dp, changed, result
diff --git a/lib/ansible/modules/cloud/amazon/dynamodb_table.py b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
index 58386ff047..838af01878 100644
--- a/lib/ansible/modules/cloud/amazon/dynamodb_table.py
+++ b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
@@ -223,7 +223,6 @@ def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3
try:
table = Table(table_name, connection=connection)
-
if dynamo_table_exists(table):
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
else:
@@ -349,7 +348,7 @@ def has_throughput_changed(table, new_throughput):
return False
return new_throughput['read'] != table.throughput['read'] or \
- new_throughput['write'] != table.throughput['write']
+ new_throughput['write'] != table.throughput['write']
def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
@@ -397,6 +396,7 @@ def validate_index(index, module):
if index['type'] not in INDEX_TYPE_OPTIONS:
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
+
def get_indexes(all_indexes):
indexes = []
global_indexes = []
@@ -429,7 +429,6 @@ def get_indexes(all_indexes):
return indexes, global_indexes
-
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
@@ -442,8 +441,8 @@ def main():
read_capacity=dict(default=1, type='int'),
write_capacity=dict(default=1, type='int'),
indexes=dict(default=[], type='list'),
- tags = dict(type='dict'),
- wait_for_active_timeout = dict(default=60, type='int'),
+ tags=dict(type='dict'),
+ wait_for_active_timeout=dict(default=60, type='int'),
))
module = AnsibleModule(
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py b/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
index 91e8b98e72..e753b369b9 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
@@ -158,7 +158,6 @@ except ImportError:
HAS_BOTO3 = False
-
def copy_image(module, ec2):
"""
Copies an AMI
@@ -185,8 +184,8 @@ def copy_image(module, ec2):
if module.params.get('tags'):
ec2.create_tags(
Resources=[image_id],
- Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()]
- )
+ Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
+ )
module.exit_json(changed=True, image_id=image_id)
except WaiterError as we:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb.py b/lib/ansible/modules/cloud/amazon/ec2_elb.py
index 56e8adcdd6..7133c1f4bf 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_elb.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_elb.py
@@ -171,7 +171,7 @@ class ElbManager:
found = False
for lb in self.lbs:
if lb.name == lbtest:
- found=True
+ found = True
break
return found
@@ -330,7 +330,7 @@ def main():
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
- ec2_elbs={'default': None, 'required': False, 'type':'list'},
+ ec2_elbs={'default': None, 'required': False, 'type': 'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
@@ -363,7 +363,7 @@ def main():
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
- msg="ELB %s does not exist" % elb
+ msg = "ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py b/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
index c983a602f5..116a9898bf 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
@@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
return _do_op
return _operation_wrapper
+
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
@@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
_THROTTLING_RETRIES = 5
+
class ElbManager(object):
"""Handles ELB creation and destruction"""
@@ -579,10 +581,10 @@ class ElbManager(object):
# status of instances behind the ELB
if info['instances']:
- info['instance_health'] = [ dict(
- instance_id = instance_state.instance_id,
- reason_code = instance_state.reason_code,
- state = instance_state.state
+ info['instance_health'] = [dict(
+ instance_id=instance_state.instance_id,
+ reason_code=instance_state.reason_code,
+ state=instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
@@ -663,7 +665,7 @@ class ElbManager(object):
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
- 'description': 'ELB {0}'.format(self.name) })
+ 'description': 'ELB {0}'.format(self.name)})
for x in range(0, max_retries):
for interface in elb_interfaces:
@@ -888,13 +890,13 @@ class ElbManager(object):
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
- set(self.zones))
+ set(self.zones))
zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
+ set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
+ set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
@@ -962,7 +964,7 @@ class ElbManager(object):
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
- "emit_interval": self.access_logs.get('interval', 60),
+ "emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
@@ -1002,10 +1004,10 @@ class ElbManager(object):
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
- return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
+ return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
- getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
+ getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
@@ -1223,7 +1225,7 @@ class ElbManager(object):
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
- self.changed=True
+ self.changed = True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
@@ -1232,7 +1234,7 @@ class ElbManager(object):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
- self.changed=True
+ self.changed = True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
@@ -1275,7 +1277,7 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
- mutually_exclusive = [['security_group_ids', 'security_group_names']]
+ mutually_exclusive=[['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
@@ -1321,7 +1323,7 @@ def main():
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
- if subnets: # We have at least one subnet, ergo this is a VPC
+ if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
@@ -1333,10 +1335,10 @@ def main():
if isinstance(group_name, string_types):
group_name = [group_name]
- group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg = str(e))
+ module.fail_json(msg=str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py b/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
index e3d86e2a3d..adf80cfac9 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
@@ -68,9 +68,9 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError,
- ansible_dict_to_boto3_filter_list, boto3_conn,
- boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
- connect_to_aws, ec2_argument_spec, get_aws_connection_info)
+ ansible_dict_to_boto3_filter_list, boto3_conn,
+ boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
+ connect_to_aws, ec2_argument_spec, get_aws_connection_info)
def list_ec2_eni_boto3(connection, module):
@@ -99,7 +99,7 @@ def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
- private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
@@ -152,7 +152,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- filters = dict(default=None, type='dict')
+ filters=dict(default=None, type='dict')
)
)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc.py b/lib/ansible/modules/cloud/amazon/ec2_lc.py
index 74787d1a74..67875c4434 100755
--- a/lib/ansible/modules/cloud/amazon/ec2_lc.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_lc.py
@@ -339,7 +339,7 @@ def create_launch_config(connection, module):
module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
result = (dict((k, v) for k, v in launch_config.items()
- if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
+ if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
result['CreatedTime'] = to_text(launch_config.get('CreatedTime'))
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py b/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
index 50ad935fe8..bff65108e2 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
@@ -191,7 +191,7 @@ def list_launch_configs(connection, module):
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
if sort:
- snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+ snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
try:
if sort and sort_start and sort_end:
@@ -210,13 +210,13 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- name = dict(required=False, default=[], type='list'),
- sort = dict(required=False, default=None,
- choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
- sort_order = dict(required=False, default='ascending',
- choices=['ascending', 'descending']),
- sort_start = dict(required=False),
- sort_end = dict(required=False),
+ name=dict(required=False, default=[], type='list'),
+ sort=dict(required=False, default=None,
+ choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
+ sort_order=dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start=dict(required=False),
+ sort_end=dict(required=False),
)
)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py b/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
index 6c8cd247ef..cbccfe662c 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
@@ -199,13 +199,13 @@ def create_metric_alarm(connection, module):
alarm = alarms[0]
changed = False
- for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
+ for attr in ('comparison', 'metric', 'namespace', 'statistic', 'threshold', 'period', 'evaluation_periods', 'unit', 'description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
- #this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
+ # this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
- comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
+ comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
@@ -215,10 +215,10 @@ def create_metric_alarm(connection, module):
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
- changed=True
+ changed = True
setattr(alarm, 'dimensions', dim1)
- for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
+ for attr in ('alarm_actions', 'insufficient_data_actions', 'ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
@@ -233,24 +233,25 @@ def create_metric_alarm(connection, module):
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
- actions_enabled=result.actions_enabled,
- alarm_actions=result.alarm_actions,
- alarm_arn=result.alarm_arn,
- comparison=result.comparison,
- description=result.description,
- dimensions=result.dimensions,
- evaluation_periods=result.evaluation_periods,
- insufficient_data_actions=result.insufficient_data_actions,
- last_updated=result.last_updated,
- metric=result.metric,
- namespace=result.namespace,
- ok_actions=result.ok_actions,
- period=result.period,
- state_reason=result.state_reason,
- state_value=result.state_value,
- statistic=result.statistic,
- threshold=result.threshold,
- unit=result.unit)
+ actions_enabled=result.actions_enabled,
+ alarm_actions=result.alarm_actions,
+ alarm_arn=result.alarm_arn,
+ comparison=result.comparison,
+ description=result.description,
+ dimensions=result.dimensions,
+ evaluation_periods=result.evaluation_periods,
+ insufficient_data_actions=result.insufficient_data_actions,
+ last_updated=result.last_updated,
+ metric=result.metric,
+ namespace=result.namespace,
+ ok_actions=result.ok_actions,
+ period=result.period,
+ state_reason=result.state_reason,
+ state_value=result.state_value,
+ statistic=result.statistic,
+ threshold=result.threshold,
+ unit=result.unit)
+
def delete_metric_alarm(connection, module):
name = module.params.get('name')
@@ -289,7 +290,7 @@ def main():
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
- )
+ )
)
module = AnsibleModule(argument_spec=argument_spec)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py b/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
index e4005145f1..708e111be6 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
@@ -88,7 +88,7 @@ def create_scaling_policy(connection, module):
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
- scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
+ scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
@@ -101,7 +101,7 @@ def create_scaling_policy(connection, module):
try:
connection.create_scaling_policy(sp)
- policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
+ policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
@@ -121,7 +121,7 @@ def create_scaling_policy(connection, module):
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
- for attr in ('adjustment_type','scaling_adjustment','cooldown'):
+ for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
@@ -129,7 +129,7 @@ def create_scaling_policy(connection, module):
try:
if changed:
connection.create_scaling_policy(policy)
- policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
+ policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
@@ -140,7 +140,7 @@ def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
- scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
+ scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if scalingPolicies:
try:
@@ -156,12 +156,12 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True, type='str'),
- adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
- asg_name = dict(required=True, type='str'),
- scaling_adjustment = dict(type='int'),
- min_adjustment_step = dict(type='int'),
- cooldown = dict(type='int'),
+ name=dict(required=True, type='str'),
+ adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
+ asg_name=dict(required=True, type='str'),
+ scaling_adjustment=dict(type='int'),
+ min_adjustment_step=dict(type='int'),
+ cooldown=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
@@ -178,7 +178,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg = str(e))
+ module.fail_json(msg=str(e))
if state == 'present':
create_scaling_policy(connection, module)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot.py
index eca2d3aeef..fed3ebe87e 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_snapshot.py
@@ -184,7 +184,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
changed = False
required = [volume_id, snapshot_id, instance_id]
- if required.count(None) != len(required) - 1: # only 1 must be set
+ if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
@@ -193,7 +193,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
@@ -202,7 +202,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
if state == 'absent':
if not snapshot_id:
- module.fail_json(msg = 'snapshot_id must be set when state is absent')
+ module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
@@ -210,7 +210,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
@@ -221,7 +221,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None,
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
- last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
+ last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
@@ -249,16 +249,16 @@ def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- volume_id = dict(),
- description = dict(),
- instance_id = dict(),
- snapshot_id = dict(),
- device_name = dict(),
- wait = dict(type='bool', default=True),
- wait_timeout = dict(type='int', default=0),
- last_snapshot_min_age = dict(type='int', default=0),
- snapshot_tags = dict(type='dict', default=dict()),
- state = dict(choices=['absent', 'present'], default='present'),
+ volume_id=dict(),
+ description=dict(),
+ instance_id=dict(),
+ snapshot_id=dict(),
+ device_name=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=0),
+ last_snapshot_min_age=dict(type='int', default=0),
+ snapshot_tags=dict(type='dict', default=dict()),
+ state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
index 96a70e559e..7972885747 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
@@ -179,8 +179,8 @@ except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
- boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info)
+ boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
+ ec2_argument_spec, get_aws_connection_info)
def list_ec2_snapshots(connection, module):
diff --git a/lib/ansible/modules/cloud/amazon/ec2_tag.py b/lib/ansible/modules/cloud/amazon/ec2_tag.py
index 0a3992fc73..beb32eeda2 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_tag.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_tag.py
@@ -126,9 +126,9 @@ from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- resource = dict(required=True),
- tags = dict(type='dict'),
- state = dict(default='present', choices=['present', 'absent', 'list']),
+ resource=dict(required=True),
+ tags=dict(type='dict'),
+ state=dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
@@ -144,7 +144,7 @@ def main():
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
- filters = {'resource-id' : resource}
+ filters = {'resource-id': resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
@@ -158,14 +158,14 @@ def main():
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
- module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
+ module.exit_json(msg="Tags already exists in %s." % resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
if not module.check_mode:
ec2.create_tags(resource, dictadd)
- module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
+ module.exit_json(msg="Tags %s created for resource %s." % (dictadd, resource), changed=True)
if state == 'absent':
if not tags:
@@ -180,7 +180,7 @@ def main():
dictremove[key] = value
if not module.check_mode:
ec2.delete_tags(resource, dictremove)
- module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
+ module.exit_json(msg="Tags %s removed for resource %s." % (dictremove, resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vol.py b/lib/ansible/modules/cloud/amazon/ec2_vol.py
index 054fcdade8..d0ab999b69 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vol.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vol.py
@@ -280,7 +280,7 @@ def get_volume(module, ec2):
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
@@ -306,7 +306,7 @@ def get_volumes(module, ec2):
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return vols
@@ -330,6 +330,7 @@ def boto_supports_volume_encryption():
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
+
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
@@ -339,6 +340,7 @@ def boto_supports_kms_key_id():
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
+
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
@@ -375,7 +377,7 @@ def create_volume(module, ec2, zone):
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return volume, changed
@@ -400,12 +402,12 @@ def attach_volume(module, ec2, volume, instance):
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
- module.fail_json(msg = "Volume %s is already attached to another instance: %s"
+ module.fail_json(msg="Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
@@ -418,7 +420,7 @@ def attach_volume(module, ec2, volume, instance):
volume.update()
changed = True
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
@@ -435,7 +437,7 @@ def modify_dot_attribute(module, ec2, instance, device_name):
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
@@ -450,7 +452,7 @@ def modify_dot_attribute(module, ec2, instance, device_name):
instance.update()
changed = True
except boto.exception.BotoServerError as e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return changed
@@ -506,20 +508,20 @@ def get_volume_info(volume, state):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- instance = dict(),
- id = dict(),
- name = dict(),
- volume_size = dict(),
- volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
- iops = dict(),
- encrypted = dict(type='bool', default=False),
- kms_key_id = dict(),
- device_name = dict(),
- delete_on_termination = dict(type='bool', default=False),
- zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
- snapshot = dict(),
- state = dict(choices=['absent', 'present', 'list'], default='present'),
- tags = dict(type='dict', default={})
+ instance=dict(),
+ id=dict(),
+ name=dict(),
+ volume_size=dict(),
+ volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
+ iops=dict(),
+ encrypted=dict(type='bool', default=False),
+ kms_key_id=dict(),
+ device_name=dict(),
+ delete_on_termination=dict(type='bool', default=False),
+ zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
+ snapshot=dict(),
+ state=dict(choices=['absent', 'present', 'list'], default='present'),
+ tags=dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
index 9d95b75b72..275602890a 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
@@ -92,12 +92,13 @@ def get_volume_info(volume):
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
- },
+ },
'tags': volume.tags
- }
+ }
return volume_info
+
def list_ec2_volumes(connection, module):
filters = module.params.get("filters")
@@ -118,7 +119,7 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- filters = dict(default=None, type='dict')
+ filters=dict(default=None, type='dict')
)
)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py
index 4c36fb4ff0..d33e9cb5dd 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py
@@ -235,6 +235,7 @@ def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
except EC2ResponseError as e:
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
+
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
"""
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
@@ -284,7 +285,7 @@ def main():
inherit_existing=dict(type='bool', default=False),
tags=dict(type='dict', default=None, aliases=['resource_tags']),
state=dict(type='str', default='present', choices=['present', 'absent'])
- )
+ )
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
@@ -312,17 +313,17 @@ def main():
new_options['ntp-servers'] = params['ntp_servers']
if params['domain_name'] is not None:
# needs to be a list for comparison with boto objects later
- new_options['domain-name'] = [ params['domain_name'] ]
+ new_options['domain-name'] = [params['domain_name']]
if params['netbios_node_type'] is not None:
# needs to be a list for comparison with boto objects later
- new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
+ new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
# If we were given a vpc_id then we need to look at the options on that
if params['vpc_id']:
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
# if we've been asked to inherit existing options, do that now
if params['inherit_existing']:
if existing_options:
- for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
+ for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
new_options[option] = existing_options.options.get(option)
@@ -336,7 +337,7 @@ def main():
# Now let's cover the case where there are existing options that we were told about by id
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
else:
- supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
+ supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
if len(supplied_options) != 1:
if params['state'] != 'absent':
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
index 0f26aac7f9..fceff34b65 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
@@ -147,7 +147,7 @@ from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_conn
DEFAULT_RULE_FIELDS = {
'RuleNumber': 32767,
'RuleAction': 'deny',
- 'CidrBlock': '0.0.0.0/0',
+ 'CidrBlock': '0.0.0.0/0',
'Protocol': '-1'
}
@@ -159,7 +159,7 @@ DEFAULT_EGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', True)])
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
-#Utility methods
+# Utility methods
def icmp_present(entry):
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
return True
@@ -225,7 +225,7 @@ def nacls_changed(nacl, client, module):
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
nacl = describe_network_acl(client, module)
entries = nacl['NetworkAcls'][0]['Entries']
- tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
+ tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS != entry]
tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
@@ -321,7 +321,7 @@ def construct_acl_entries(nacl, client, module):
create_network_acl_entry(params, client, module)
-## Module invocations
+# Module invocations
def setup_network_acl(client, module):
changed = False
nacl = describe_network_acl(client, module)
@@ -372,7 +372,7 @@ def remove_network_acl(client, module):
return changed, result
-#Boto3 client methods
+# Boto3 client methods
def create_network_acl(vpc_id, client, module):
try:
if module.check_mode:
@@ -546,7 +546,7 @@ def main():
ingress=dict(required=False, type='list', default=list()),
egress=dict(required=False, type='list', default=list(),),
state=dict(default='present', choices=['present', 'absent']),
- ),
+ ),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
index a1a54d676b..520424bfb4 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
@@ -154,9 +154,9 @@ def vpc_exists(module, vpc, name, cidr_block, multi):
matched_vpc = None
try:
- matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
+ matching_vpcs = vpc.get_all_vpcs(filters={'tag:Name': name, 'cidr-block': cidr_block})
except Exception as e:
- e_msg=boto_exception(e)
+ e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
if multi:
@@ -186,7 +186,7 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name):
else:
return False
except Exception as e:
- e_msg=boto_exception(e)
+ e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
@@ -199,6 +199,7 @@ def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
else:
return False
+
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
@@ -213,18 +214,19 @@ def get_vpc_values(vpc_obj):
else:
return None
+
def main():
- argument_spec=ec2_argument_spec()
+ argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- name = dict(type='str', default=None, required=True),
- cidr_block = dict(type='str', default=None, required=True),
- tenancy = dict(choices=['default', 'dedicated'], default='default'),
- dns_support = dict(type='bool', default=True),
- dns_hostnames = dict(type='bool', default=True),
- dhcp_opts_id = dict(type='str', default=None, required=False),
- tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
- state = dict(choices=['present', 'absent'], default='present'),
- multi_ok = dict(type='bool', default=False)
+ name=dict(type='str', default=None, required=True),
+ cidr_block=dict(type='str', default=None, required=True),
+ tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ dns_support=dict(type='bool', default=True),
+ dns_hostnames=dict(type='bool', default=True),
+ dhcp_opts_id=dict(type='str', default=None, required=False),
+ tags=dict(type='dict', required=False, default=None, aliases=['resource_tags']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ multi_ok=dict(type='bool', default=False)
)
)
@@ -236,17 +238,17 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
- name=module.params.get('name')
- cidr_block=module.params.get('cidr_block')
- tenancy=module.params.get('tenancy')
- dns_support=module.params.get('dns_support')
- dns_hostnames=module.params.get('dns_hostnames')
- dhcp_id=module.params.get('dhcp_opts_id')
- tags=module.params.get('tags')
- state=module.params.get('state')
- multi=module.params.get('multi_ok')
+ name = module.params.get('name')
+ cidr_block = module.params.get('cidr_block')
+ tenancy = module.params.get('tenancy')
+ dns_support = module.params.get('dns_support')
+ dns_hostnames = module.params.get('dns_hostnames')
+ dhcp_id = module.params.get('dhcp_opts_id')
+ tags = module.params.get('tags')
+ state = module.params.get('state')
+ multi = module.params.get('multi_ok')
- changed=False
+ changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
@@ -298,7 +300,7 @@ def main():
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError as e:
- e_msg=boto_exception(e)
+ e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
if not module.check_mode:
@@ -306,7 +308,7 @@ def main():
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError as e:
- e_msg=boto_exception(e)
+ e_msg = boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
@@ -325,7 +327,7 @@ def main():
except BotoServerError as e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
- "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
+ "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
index 73d9dfc2ff..d0e106a2d7 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
@@ -373,7 +373,7 @@ def main():
client = boto3_conn(module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - "+str(e))
+ module.fail_json(msg="Can't authorize connection - " + str(e))
if state == 'present':
(changed, results) = create_peer_connection(client, module)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
index 700b692d88..53c954acc4 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
@@ -147,6 +147,7 @@ def get_vgw_info(vgws):
return vgw_info
+
def wait_for_status(client, module, vpn_gateway_id, status):
polling_increment_secs = 15
max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
@@ -227,7 +228,7 @@ def delete_vgw(client, module, vpn_gateway_id):
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- #return the deleted VpnGatewayId as this is not included in the above response
+ # return the deleted VpnGatewayId as this is not included in the above response
result = vpn_gateway_id
return result
@@ -236,7 +237,7 @@ def create_tags(client, module, vpn_gateway_id):
params = dict()
try:
- response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
+ response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@@ -280,7 +281,7 @@ def find_tags(client, module, resource_id=None):
try:
response = client.describe_tags(Filters=[
{'Name': 'resource-id', 'Values': [resource_id]}
- ])
+ ])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@@ -295,7 +296,7 @@ def check_tags(client, module, existing_vgw, vpn_gateway_id):
changed = False
tags_list = {}
- #format tags for comparison
+ # format tags for comparison
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_list[tags['Key']] = tags['Value']
@@ -307,7 +308,7 @@ def check_tags(client, module, existing_vgw, vpn_gateway_id):
vgw = find_vgw(client, module)
changed = True
- #if no tag args are supplied, delete any existing tags with the exception of the name tag
+ # if no tag args are supplied, delete any existing tags with the exception of the name tag
if params['Tags'] is None and tags_list != {}:
tags_to_delete = []
for tags in existing_vgw[0]['Tags']:
@@ -346,7 +347,7 @@ def find_vgw(client, module, vpn_gateway_id=None):
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
- ])
+ ])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@@ -362,7 +363,7 @@ def find_vgw(client, module, vpn_gateway_id=None):
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
- ])
+ ])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@@ -502,7 +503,7 @@ def ensure_vgw_absent(client, module):
deleted_vgw = "Nothing to do"
else:
- #Check that a name and type argument has been supplied if no vgw-id
+ # Check that a name and type argument has been supplied if no vgw-id
if not module.params.get('name') or not module.params.get('type'):
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
@@ -518,7 +519,7 @@ def ensure_vgw_absent(client, module):
# detach the vpc from the vgw
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
- #now that the vpc has been detached, delete the vgw
+ # now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
@@ -528,7 +529,7 @@ def ensure_vgw_absent(client, module):
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
- #now that the vpc has been detached, delete the vgw
+ # now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
else:
@@ -555,7 +556,7 @@ def main():
wait_timeout=dict(type='int', default=320),
type=dict(default='ipsec.1', choices=['ipsec.1']),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
- )
+ )
)
module = AnsibleModule(argument_spec=argument_spec)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
index 208dc04c69..a6cf9313eb 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
@@ -107,10 +107,10 @@ from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info
def get_virtual_gateway_info(virtual_gateway):
virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'],
- 'State': virtual_gateway['State'],
- 'Type': virtual_gateway['Type'],
- 'VpcAttachments': virtual_gateway['VpcAttachments'],
- 'Tags': virtual_gateway['Tags']}
+ 'State': virtual_gateway['State'],
+ 'Type': virtual_gateway['Type'],
+ 'VpcAttachments': virtual_gateway['VpcAttachments'],
+ 'Tags': virtual_gateway['Tags']}
return virtual_gateway_info
@@ -126,10 +126,10 @@ def list_virtual_gateways(client, module):
try:
all_virtual_gateways = client.describe_vpn_gateways(**params)
except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e),exception=traceback.format_exc())
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
- for vgw in all_virtual_gateways['VpnGateways']]
+ for vgw in all_virtual_gateways['VpnGateways']]
module.exit_json(virtual_gateways=snaked_vgws)
@@ -138,8 +138,8 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- filters = dict(type='dict', default=dict()),
- vpn_gateway_ids = dict(type='list', default=None)
+ filters=dict(type='dict', default=dict()),
+ vpn_gateway_ids=dict(type='list', default=None)
)
)
@@ -153,7 +153,7 @@ def main():
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - "+str(e))
+ module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_virtual_gateways(connection, module)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_win_password.py b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
index ad7715defa..b28a59f89c 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_win_password.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
@@ -119,11 +119,11 @@ BACKEND = default_backend()
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- instance_id = dict(required=True),
- key_file = dict(required=True, type='path'),
- key_passphrase = dict(no_log=True, default=None, required=False),
- wait = dict(type='bool', default=False, required=False),
- wait_timeout = dict(default=120, required=False),
+ instance_id=dict(required=True),
+ key_file=dict(required=True, type='path'),
+ key_passphrase=dict(no_log=True, default=None, required=False),
+ wait=dict(type='bool', default=False, required=False),
+ wait_timeout=dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@@ -158,18 +158,18 @@ def main():
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
- module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
+ module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
try:
f = open(key_file, 'rb')
except IOError as e:
- module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
+ module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
else:
try:
with f:
key = load_pem_private_key(f.read(), b_key_passphrase, BACKEND)
except (ValueError, TypeError) as e:
- module.fail_json(msg = "unable to parse key file")
+ module.fail_json(msg="unable to parse key file")
try:
decrypted = key.decrypt(decoded, PKCS1v15())
diff --git a/lib/ansible/modules/cloud/amazon/ecs_cluster.py b/lib/ansible/modules/cloud/amazon/ecs_cluster.py
index 6cd1e58d8b..c50eedc57b 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_cluster.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_cluster.py
@@ -144,34 +144,35 @@ class EcsClusterManager:
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
- if len(response['failures'])>0:
+ if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
- if c and c['reason']=='MISSING':
+ if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
- if len(response['clusters'])>0:
+ if len(response['clusters']) > 0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise Exception("Unknown problem describing cluster %s." % cluster_name)
- def create_cluster(self, clusterName = 'default'):
+ def create_cluster(self, clusterName='default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
- name=dict(required=True, type='str' ),
+ state=dict(required=True, choices=['present', 'absent', 'has_instances']),
+ name=dict(required=True, type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
- required_together = ( ['state', 'name'] )
+ required_together = (['state', 'name'])
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
@@ -185,12 +186,12 @@ def main():
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception as e:
- module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
+ module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
- if existing and 'status' in existing and existing['status']=="ACTIVE":
- results['cluster']=existing
+ if existing and 'status' in existing and existing['status'] == "ACTIVE":
+ results['cluster'] = existing
else:
if not module.check_mode:
# doesn't exist. create it.
@@ -205,7 +206,7 @@ def main():
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
- if 'status' in existing and existing['status']=="INACTIVE":
+ if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
@@ -213,7 +214,7 @@ def main():
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
- module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
+ module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
@@ -228,8 +229,8 @@ def main():
results['changed'] = True
break
time.sleep(delay)
- if count == 0 and i is repeat-1:
- module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
+ if count == 0 and i is repeat - 1:
+ module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
return
module.exit_json(**results)
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service.py b/lib/ansible/modules/cloud/amazon/ecs_service.py
index 68d8905b0e..c0cb0e2d1e 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_service.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_service.py
@@ -308,13 +308,13 @@ class EcsServiceManager:
cluster=cluster_name,
services=[service_name])
msg = ''
- if len(response['failures'])>0:
+ if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], service_name, 'arn')
msg += ", failure reason is " + c['reason']
- if c and c['reason']=='MISSING':
+ if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
- if len(response['services'])>0:
+ if len(response['services']) > 0:
c = self.find_in_array(response['services'], service_name)
if c:
return c
@@ -426,7 +426,7 @@ def main():
matching = False
update = False
- if existing and 'status' in existing and existing['status']=="ACTIVE":
+ if existing and 'status' in existing and existing['status'] == "ACTIVE":
if service_mgr.is_matching_service(module.params, existing):
matching = True
results['service'] = service_mgr.jsonize(existing)
@@ -446,25 +446,25 @@ def main():
if update:
# update required
response = service_mgr.update_service(module.params['name'],
- module.params['cluster'],
- module.params['task_definition'],
- loadBalancers,
- module.params['desired_count'],
- clientToken,
- role,
- deploymentConfiguration)
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role,
+ deploymentConfiguration)
else:
# doesn't exist. create it.
response = service_mgr.create_service(module.params['name'],
- module.params['cluster'],
- module.params['task_definition'],
- loadBalancers,
- module.params['desired_count'],
- clientToken,
- role,
- deploymentConfiguration,
- module.params['placement_constraints'],
- module.params['placement_strategy'])
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role,
+ deploymentConfiguration,
+ module.params['placement_constraints'],
+ module.params['placement_strategy'])
results['service'] = response
@@ -479,7 +479,7 @@ def main():
del existing['deployments']
del existing['events']
results['ansible_facts'] = existing
- if 'status' in existing and existing['status']=="INACTIVE":
+ if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service_facts.py b/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
index 001f0a8944..d35e54dd66 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
@@ -170,17 +170,17 @@ class EcsServiceManager:
if cluster and cluster is not None:
fn_args['cluster'] = cluster
response = self.ecs.list_services(**fn_args)
- relevant_response = dict(services = response['serviceArns'])
+ relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
- fn_args['services']=services.split(",")
+ fn_args['services'] = services.split(",")
response = self.ecs.describe_services(**fn_args)
- relevant_response = dict(services = map(self.extract_service_from, response['services']))
- if 'failures' in response and len(response['failures'])>0:
+ relevant_response = dict(services=map(self.extract_service_from, response['services']))
+ if 'failures' in response and len(response['failures']) > 0:
relevant_response['services_not_running'] = response['failures']
return relevant_response
@@ -199,13 +199,14 @@ class EcsServiceManager:
e['createdAt'] = str(e['createdAt'])
return service
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- details=dict(required=False, type='bool', default=False ),
- cluster=dict(required=False, type='str' ),
- service=dict(required=False, type='str' )
+ details=dict(required=False, type='bool', default=False),
+ cluster=dict(required=False, type='str'),
+ service=dict(required=False, type='str')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
diff --git a/lib/ansible/modules/cloud/amazon/ecs_task.py b/lib/ansible/modules/cloud/amazon/ecs_task.py
index 804890b633..aa4cb6ad90 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_task.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_task.py
@@ -186,7 +186,7 @@ class EcsExecManager:
family=service_name,
desiredStatus=status
)
- if len(response['taskArns'])>0:
+ if len(response['taskArns']) > 0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
@@ -209,13 +209,13 @@ class EcsExecManager:
if cluster:
args['cluster'] = cluster
if task_definition:
- args['taskDefinition']=task_definition
+ args['taskDefinition'] = task_definition
if overrides:
- args['overrides']=overrides
+ args['overrides'] = overrides
if container_instances:
- args['containerInstances']=container_instances
+ args['containerInstances'] = container_instances
if startedBy:
- args['startedBy']=startedBy
+ args['startedBy'] = startedBy
response = self.ecs.start_task(**args)
# include tasks and failures
return response['tasks']
@@ -224,17 +224,18 @@ class EcsExecManager:
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- operation=dict(required=True, choices=['run', 'start', 'stop'] ),
- cluster=dict(required=False, type='str' ), # R S P
- task_definition=dict(required=False, type='str' ), # R* S*
- overrides=dict(required=False, type='dict'), # R S
- count=dict(required=False, type='int' ), # R
- task=dict(required=False, type='str' ), # P*
- container_instances=dict(required=False, type='list'), # S*
- started_by=dict(required=False, type='str' ) # R S
+ operation=dict(required=True, choices=['run', 'start', 'stop']),
+ cluster=dict(required=False, type='str'), # R S P
+ task_definition=dict(required=False, type='str'), # R* S*
+ overrides=dict(required=False, type='dict'), # R S
+ count=dict(required=False, type='int'), # R
+ task=dict(required=False, type='str'), # P*
+ container_instances=dict(required=False, type='list'), # S*
+ started_by=dict(required=False, type='str') # R S
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
@@ -276,7 +277,7 @@ def main():
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
- results['task']=existing
+ results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
@@ -290,7 +291,7 @@ def main():
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
- results['task']=existing
+ results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
@@ -304,7 +305,7 @@ def main():
elif module.params['operation'] == 'stop':
if existing:
- results['task']=existing
+ results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
diff --git a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
index bcbffc6fdd..31f7f395dd 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
@@ -131,6 +131,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_text
+
class EcsTaskManager:
"""Handles ECS Tasks"""
@@ -183,7 +184,7 @@ class EcsTaskManager:
def describe_task_definitions(self, family):
data = {
"taskDefinitionArns": [],
- "nextToken": None
+ "nextToken": None
}
def fetch():
@@ -371,7 +372,7 @@ def main():
if 'arn' in module.params and module.params['arn'] is not None:
task_to_describe = module.params['arn']
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
- module.params['revision'] is not None:
+ module.params['revision'] is not None:
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
diff --git a/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py b/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
index ccf9ccdbb2..867f0a70fb 100644
--- a/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
+++ b/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
@@ -76,10 +76,10 @@ from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connec
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- state = dict(required=True, choices=['present', 'absent']),
- name = dict(required=True),
- description = dict(required=False),
- subnets = dict(required=False, type='list'),
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ description=dict(required=False),
+ subnets=dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@@ -87,26 +87,25 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
- state = module.params.get('state')
- group_name = module.params.get('name').lower()
- group_description = module.params.get('description')
- group_subnets = module.params.get('subnets') or {}
+ state = module.params.get('state')
+ group_name = module.params.get('name').lower()
+ group_description = module.params.get('description')
+ group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
- module.fail_json(msg = str("Parameter %s required for state='present'" % required))
+ module.fail_json(msg=str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
- module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
+ module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
- module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
-
+ module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
@@ -123,7 +122,7 @@ def main():
exists = len(matching_groups) > 0
except BotoServerError as e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
- module.fail_json(msg = e.error_message)
+ module.fail_json(msg=e.error_message)
if state == 'absent':
if exists:
@@ -139,7 +138,7 @@ def main():
except BotoServerError as e:
if e.error_message != 'No modifications were requested.':
- module.fail_json(msg = e.error_message)
+ module.fail_json(msg=e.error_message)
else:
changed = False
diff --git a/lib/ansible/modules/cloud/amazon/elb_application_lb.py b/lib/ansible/modules/cloud/amazon/elb_application_lb.py
index 5fabfd3464..2878dceb24 100644
--- a/lib/ansible/modules/cloud/amazon/elb_application_lb.py
+++ b/lib/ansible/modules/cloud/amazon/elb_application_lb.py
@@ -657,7 +657,6 @@ def compare_listeners(connection, module, current_listeners, new_listeners, purg
def compare_rules(connection, module, current_listeners, listener):
-
"""
Compare rules and return rules to add, rules to modify and rules to remove
Rules are compared based on priority
diff --git a/lib/ansible/modules/cloud/amazon/elb_classic_lb.py b/lib/ansible/modules/cloud/amazon/elb_classic_lb.py
index 81c354cb88..4db1b735f0 100644
--- a/lib/ansible/modules/cloud/amazon/elb_classic_lb.py
+++ b/lib/ansible/modules/cloud/amazon/elb_classic_lb.py
@@ -425,6 +425,7 @@ def _throttleable_operation(max_retries):
return _do_op
return _operation_wrapper
+
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
@@ -434,6 +435,7 @@ def _get_vpc_connection(module, region, aws_connect_params):
_THROTTLING_RETRIES = 5
+
class ElbManager(object):
"""Handles ELB creation and destruction"""
@@ -579,10 +581,10 @@ class ElbManager(object):
# status of instances behind the ELB
if info['instances']:
- info['instance_health'] = [ dict(
- instance_id = instance_state.instance_id,
- reason_code = instance_state.reason_code,
- state = instance_state.state
+ info['instance_health'] = [dict(
+ instance_id=instance_state.instance_id,
+ reason_code=instance_state.reason_code,
+ state=instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
@@ -663,7 +665,7 @@ class ElbManager(object):
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
- 'description': 'ELB {0}'.format(self.name) })
+ 'description': 'ELB {0}'.format(self.name)})
for x in range(0, max_retries):
for interface in elb_interfaces:
@@ -888,13 +890,13 @@ class ElbManager(object):
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
- set(self.zones))
+ set(self.zones))
zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
+ set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
+ set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
@@ -962,7 +964,7 @@ class ElbManager(object):
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
- "emit_interval": self.access_logs.get('interval', 60),
+ "emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
@@ -1002,10 +1004,10 @@ class ElbManager(object):
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
- return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
+ return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
- getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
+ getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
@@ -1223,7 +1225,7 @@ class ElbManager(object):
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
- self.changed=True
+ self.changed = True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
@@ -1232,7 +1234,7 @@ class ElbManager(object):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
- self.changed=True
+ self.changed = True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
@@ -1275,7 +1277,7 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
- mutually_exclusive = [['security_group_ids', 'security_group_names']]
+ mutually_exclusive=[['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
@@ -1321,7 +1323,7 @@ def main():
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
- if subnets: # We have at least one subnet, ergo this is a VPC
+ if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
@@ -1333,10 +1335,10 @@ def main():
if isinstance(group_name, string_types):
group_name = [group_name]
- group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg = str(e))
+ module.fail_json(msg=str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
diff --git a/lib/ansible/modules/cloud/amazon/elb_instance.py b/lib/ansible/modules/cloud/amazon/elb_instance.py
index dea3a00c14..c6193f5938 100644
--- a/lib/ansible/modules/cloud/amazon/elb_instance.py
+++ b/lib/ansible/modules/cloud/amazon/elb_instance.py
@@ -167,7 +167,7 @@ class ElbManager:
found = False
for lb in self.lbs:
if lb.name == lbtest:
- found=True
+ found = True
break
return found
@@ -326,7 +326,7 @@ def main():
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
- ec2_elbs={'default': None, 'required': False, 'type':'list'},
+ ec2_elbs={'default': None, 'required': False, 'type': 'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
@@ -359,7 +359,7 @@ def main():
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
- msg="ELB %s does not exist" % elb
+ msg = "ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
diff --git a/lib/ansible/modules/cloud/amazon/execute_lambda.py b/lib/ansible/modules/cloud/amazon/execute_lambda.py
index d2530d8284..e0c9be5ac1 100644
--- a/lib/ansible/modules/cloud/amazon/execute_lambda.py
+++ b/lib/ansible/modules/cloud/amazon/execute_lambda.py
@@ -153,13 +153,13 @@ from ansible.module_utils._text import to_native
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- name = dict(),
- function_arn = dict(),
- wait = dict(default=True, type='bool'),
- tail_log = dict(default=False, type='bool'),
- dry_run = dict(default=False, type='bool'),
- version_qualifier = dict(),
- payload = dict(default={}, type='dict'),
+ name=dict(),
+ function_arn=dict(),
+ wait=dict(default=True, type='bool'),
+ tail_log=dict(default=False, type='bool'),
+ dry_run=dict(default=False, type='bool'),
+ version_qualifier=dict(),
+ payload=dict(default={}, type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
@@ -172,13 +172,13 @@ def main():
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
- name = module.params.get('name')
- function_arn = module.params.get('function_arn')
- await_return = module.params.get('wait')
- dry_run = module.params.get('dry_run')
- tail_log = module.params.get('tail_log')
- version_qualifier = module.params.get('version_qualifier')
- payload = module.params.get('payload')
+ name = module.params.get('name')
+ function_arn = module.params.get('function_arn')
+ await_return = module.params.get('wait')
+ dry_run = module.params.get('dry_run')
+ tail_log = module.params.get('tail_log')
+ version_qualifier = module.params.get('version_qualifier')
+ payload = module.params.get('payload')
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
@@ -247,7 +247,7 @@ def main():
module.fail_json(msg="Unexpected failure while invoking Lambda function",
exception=traceback.format_exc())
- results ={
+ results = {
'logs': '',
'status': response['StatusCode'],
'output': '',
@@ -276,7 +276,7 @@ def main():
# format the stacktrace sent back as an array into a multiline string
'trace': '\n'.join(
[' '.join([
- str(x) for x in line # cast line numbers to strings
+ str(x) for x in line # cast line numbers to strings
]) for line in results.get('output', {}).get('stackTrace', [])]
),
'errmsg': results['output'].get('errorMessage'),
diff --git a/lib/ansible/modules/cloud/amazon/iam.py b/lib/ansible/modules/cloud/amazon/iam.py
index 0c2770815b..346ac2095e 100644
--- a/lib/ansible/modules/cloud/amazon/iam.py
+++ b/lib/ansible/modules/cloud/amazon/iam.py
@@ -240,8 +240,8 @@ def create_user(module, iam, name, pwd, path, key_state, key_count):
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
- user_name=name).create_access_key_response.\
- create_access_key_result.\
+ user_name=name).create_access_key_response.
+ create_access_key_result.
access_key)
key_qty += 1
else:
@@ -258,7 +258,7 @@ def delete_dependencies_first(module, iam, name):
# try to delete any keys
try:
current_keys = [ck['access_key_id'] for ck in
- iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
+ iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
changed = True
@@ -447,7 +447,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key
def set_users_groups(module, iam, name, groups, updated=None,
-new_name=None):
+ new_name=None):
""" Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
@@ -526,6 +526,7 @@ def delete_group(module=None, iam=None, name=None):
changed = True
return changed, name
+
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
@@ -554,12 +555,12 @@ def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc)
if name not in role_list:
changed = True
iam_role_result = iam.create_role(name,
- assume_role_policy_document=trust_policy_doc,
- path=path).create_role_response.create_role_result.role
+ assume_role_policy_document=trust_policy_doc,
+ path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name,
- path=path).create_instance_profile_response.create_instance_profile_result.instance_profile
+ path=path).create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
else:
instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
@@ -685,7 +686,7 @@ def main():
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
- "please specify present or absent")
+ "please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
@@ -865,7 +866,7 @@ def main():
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list, role_result=role_result,
- instance_profile_result=instance_profile_result)
+ instance_profile_result=instance_profile_result)
if __name__ == '__main__':
diff --git a/lib/ansible/modules/cloud/amazon/iam_policy.py b/lib/ansible/modules/cloud/amazon/iam_policy.py
index 06268514c0..b4c1eafe16 100644
--- a/lib/ansible/modules/cloud/amazon/iam_policy.py
+++ b/lib/ansible/modules/cloud/amazon/iam_policy.py
@@ -138,15 +138,15 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
- list_user_policies_result.
- policy_names]
+ list_user_policies_result.
+ policy_names]
matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.parse.unquote(iam.get_user_policy(name, pol).
- get_user_policy_result.policy_document) == pdoc:
+ get_user_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
@@ -168,8 +168,8 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
- list_user_policies_result.
- policy_names]
+ list_user_policies_result.
+ policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
@@ -182,8 +182,8 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
- list_role_policies_result.
- policy_names]
+ list_role_policies_result.
+ policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
@@ -195,7 +195,7 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_role_policy(name, pol).
- get_role_policy_result.policy_document) == pdoc:
+ get_role_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
@@ -220,8 +220,8 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
module.fail_json(msg=err.message)
updated_policies = [cp for cp in iam.list_role_policies(name).
- list_role_policies_result.
- policy_names]
+ list_role_policies_result.
+ policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
@@ -232,19 +232,19 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
- msg=''
+ msg = ''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
- list_group_policies_result.
- policy_names]
+ list_group_policies_result.
+ policy_names]
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_group_policy(name, pol).
- get_group_policy_result.policy_document) == pdoc:
+ get_group_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
- msg=("The policy document you specified already exists "
- "under the name %s." % pol)
+ msg = ("The policy document you specified already exists "
+ "under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
@@ -264,8 +264,8 @@ def group_action(module, iam, name, policy_name, skip, pdoc, state):
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
- list_group_policies_result.
- policy_names]
+ list_group_policies_result.
+ policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
@@ -317,7 +317,7 @@ def main():
except Exception as e:
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
else:
- pdoc=None
+ pdoc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
@@ -343,8 +343,8 @@ def main():
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
- policy_name, skip, pdoc,
- state)
+ policy_name, skip, pdoc,
+ state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
diff --git a/lib/ansible/modules/cloud/amazon/rds_subnet_group.py b/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
index 3b28468292..e415319f00 100644
--- a/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
+++ b/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
@@ -80,10 +80,10 @@ from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- state = dict(required=True, choices=['present', 'absent']),
- name = dict(required=True),
- description = dict(required=False),
- subnets = dict(required=False, type='list'),
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True),
+ description=dict(required=False),
+ subnets=dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@@ -91,30 +91,30 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
- state = module.params.get('state')
- group_name = module.params.get('name').lower()
- group_description = module.params.get('description')
- group_subnets = module.params.get('subnets') or {}
+ state = module.params.get('state')
+ group_name = module.params.get('name').lower()
+ group_description = module.params.get('description')
+ group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
- module.fail_json(msg = str("Parameter %s required for state='present'" % required))
+ module.fail_json(msg=str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
- module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
+ module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
- module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
+ module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
try:
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
except BotoServerError as e:
- module.fail_json(msg = e.error_message)
+ module.fail_json(msg=e.error_message)
try:
changed = False
@@ -125,7 +125,7 @@ def main():
exists = len(matching_groups) > 0
except BotoServerError as e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
- module.fail_json(msg = e.error_message)
+ module.fail_json(msg=e.error_message)
if state == 'absent':
if exists:
@@ -145,7 +145,7 @@ def main():
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError as e:
- module.fail_json(msg = e.error_message)
+ module.fail_json(msg=e.error_message)
module.exit_json(changed=changed)
diff --git a/lib/ansible/modules/cloud/amazon/redshift.py b/lib/ansible/modules/cloud/amazon/redshift.py
index f035d17505..6d7a942752 100644
--- a/lib/ansible/modules/cloud/amazon/redshift.py
+++ b/lib/ansible/modules/cloud/amazon/redshift.py
@@ -237,15 +237,15 @@ from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
- 'identifier' : resource['ClusterIdentifier'],
- 'create_time' : resource['ClusterCreateTime'],
- 'status' : resource['ClusterStatus'],
- 'username' : resource['MasterUsername'],
- 'db_name' : resource['DBName'],
- 'availability_zone' : resource['AvailabilityZone'],
+ 'identifier': resource['ClusterIdentifier'],
+ 'create_time': resource['ClusterCreateTime'],
+ 'status': resource['ClusterStatus'],
+ 'username': resource['MasterUsername'],
+ 'db_name': resource['DBName'],
+ 'availability_zone': resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
- 'url' : resource['Endpoint']['Address'],
- 'port' : resource['Endpoint']['Port']
+ 'url': resource['Endpoint']['Address'],
+ 'port': resource['Endpoint']['Port']
}
for node in resource['ClusterNodes']:
@@ -267,11 +267,11 @@ def create_cluster(module, redshift):
Returns:
"""
- identifier = module.params.get('identifier')
- node_type = module.params.get('node_type')
- username = module.params.get('username')
- password = module.params.get('password')
- wait = module.params.get('wait')
+ identifier = module.params.get('identifier')
+ node_type = module.params.get('node_type')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
@@ -286,7 +286,7 @@ def create_cluster(module, redshift):
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip'):
if p in module.params:
- params[ p ] = module.params.get( p )
+ params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@@ -310,7 +310,7 @@ def create_cluster(module, redshift):
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
- module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+ module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@@ -368,7 +368,7 @@ def delete_cluster(module, redshift):
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
- module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+ module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@@ -386,8 +386,8 @@ def modify_cluster(module, redshift):
redshift: authenticated redshift connection object
"""
- identifier = module.params.get('identifier')
- wait = module.params.get('wait')
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Package up the optional parameters
@@ -422,7 +422,7 @@ def modify_cluster(module, redshift):
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
- module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+ module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
@@ -436,34 +436,34 @@ def modify_cluster(module, redshift):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
- identifier = dict(required=True),
- node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
- 'dc2.large','dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
- 'dw2.8xlarge'], required=False),
- username = dict(required=False),
- password = dict(no_log=True, required=False),
- db_name = dict(require=False),
- cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
- cluster_security_groups = dict(aliases=['security_groups'], type='list'),
- vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
- skip_final_cluster_snapshot = dict(aliases=['skip_final_snapshot'], type='bool', default=False),
- final_cluster_snapshot_identifier = dict(aliases=['final_snapshot_id'], required=False),
- cluster_subnet_group_name = dict(aliases=['subnet']),
- availability_zone = dict(aliases=['aws_zone', 'zone']),
- preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
- cluster_parameter_group_name = dict(aliases=['param_group_name']),
- automated_snapshot_retention_period = dict(aliases=['retention_period']),
- port = dict(type='int'),
- cluster_version = dict(aliases=['version'], choices=['1.0']),
- allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
- number_of_nodes = dict(type='int'),
- publicly_accessible = dict(type='bool', default=False),
- encrypted = dict(type='bool', default=False),
- elastic_ip = dict(required=False),
- new_cluster_identifier = dict(aliases=['new_identifier']),
- wait = dict(type='bool', default=False),
- wait_timeout = dict(type='int', default=300),
+ command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
+ identifier=dict(required=True),
+ node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
+ 'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
+ 'dw2.8xlarge'], required=False),
+ username=dict(required=False),
+ password=dict(no_log=True, required=False),
+ db_name=dict(require=False),
+ cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'),
+ cluster_security_groups=dict(aliases=['security_groups'], type='list'),
+ vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
+ skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False),
+ final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
+ cluster_subnet_group_name=dict(aliases=['subnet']),
+ availability_zone=dict(aliases=['aws_zone', 'zone']),
+ preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
+ cluster_parameter_group_name=dict(aliases=['param_group_name']),
+ automated_snapshot_retention_period=dict(aliases=['retention_period']),
+ port=dict(type='int'),
+ cluster_version=dict(aliases=['version'], choices=['1.0']),
+ allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
+ number_of_nodes=dict(type='int'),
+ publicly_accessible=dict(type='bool', default=False),
+ encrypted=dict(type='bool', default=False),
+ elastic_ip=dict(required=False),
+ new_cluster_identifier=dict(aliases=['new_identifier']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
))
required_if = [
diff --git a/lib/ansible/modules/cloud/amazon/route53_health_check.py b/lib/ansible/modules/cloud/amazon/route53_health_check.py
index ea9949a9b0..babfde324b 100644
--- a/lib/ansible/modules/cloud/amazon/route53_health_check.py
+++ b/lib/ansible/modules/cloud/amazon/route53_health_check.py
@@ -166,6 +166,7 @@ def find_health_check(conn, wanted):
return check
return None
+
def to_health_check(config):
return HealthCheck(
config.get('IPAddress'),
@@ -178,6 +179,7 @@ def to_health_check(config):
failure_threshold=int(config.get('FailureThreshold')),
)
+
def health_check_diff(a, b):
a = a.__dict__
b = b.__dict__
@@ -189,6 +191,7 @@ def health_check_diff(a, b):
diff[key] = b.get(key)
return diff
+
def to_template_params(health_check):
params = {
'ip_addr_part': '',
@@ -240,7 +243,8 @@ UPDATEHCXMLBody = """
</UpdateHealthCheckRequest>
"""
-def create_health_check(conn, health_check, caller_ref = None):
+
+def create_health_check(conn, health_check, caller_ref=None):
if caller_ref is None:
caller_ref = str(uuid.uuid4())
uri = '/%s/healthcheck' % conn.Version
@@ -259,6 +263,7 @@ def create_health_check(conn, health_check, caller_ref = None):
else:
raise exception.DNSServerError(response.status, response.reason, body)
+
def update_health_check(conn, health_check_id, health_check_version, health_check):
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
params = to_template_params(health_check)
@@ -279,18 +284,19 @@ def update_health_check(conn, health_check_id, health_check_version, health_chec
h.parse(body)
return e
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- state = dict(choices=['present', 'absent'], default='present'),
- ip_address = dict(),
- port = dict(type='int'),
- type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
- resource_path = dict(),
- fqdn = dict(),
- string_match = dict(),
- request_interval = dict(type='int', choices=[10, 30], default=30),
- failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ip_address=dict(),
+ port=dict(type='int'),
+ type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
+ resource_path=dict(),
+ fqdn=dict(),
+ string_match=dict(),
+ request_interval=dict(type='int', choices=[10, 30], default=30),
+ failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@@ -298,15 +304,15 @@ def main():
if not HAS_BOTO:
module.fail_json(msg='boto 2.27.0+ required for this module')
- state_in = module.params.get('state')
- ip_addr_in = module.params.get('ip_address')
- port_in = module.params.get('port')
- type_in = module.params.get('type')
- resource_path_in = module.params.get('resource_path')
- fqdn_in = module.params.get('fqdn')
- string_match_in = module.params.get('string_match')
- request_interval_in = module.params.get('request_interval')
- failure_threshold_in = module.params.get('failure_threshold')
+ state_in = module.params.get('state')
+ ip_addr_in = module.params.get('ip_address')
+ port_in = module.params.get('port')
+ type_in = module.params.get('type')
+ resource_path_in = module.params.get('resource_path')
+ fqdn_in = module.params.get('fqdn')
+ string_match_in = module.params.get('string_match')
+ request_interval_in = module.params.get('request_interval')
+ failure_threshold_in = module.params.get('failure_threshold')
if ip_addr_in is None and fqdn_in is None:
module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
@@ -334,7 +340,7 @@ def main():
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError as e:
- module.fail_json(msg = e.error_message)
+ module.fail_json(msg=e.error_message)
changed = False
action = None
@@ -362,7 +368,7 @@ def main():
conn.delete_health_check(check_id)
changed = True
else:
- module.fail_json(msg = "Logic Error: Unknown state")
+ module.fail_json(msg="Logic Error: Unknown state")
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
diff --git a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
index 3cd87c6a12..04f70b3924 100644
--- a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
+++ b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
@@ -256,6 +256,7 @@ def create_lifecycle_rule(connection, module):
module.exit_json(changed=changed)
+
def compare_rule(rule_a, rule_b):
# Copy objects
@@ -364,27 +365,27 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True, type='str'),
- expiration_days = dict(default=None, required=False, type='int'),
- expiration_date = dict(default=None, required=False, type='str'),
- prefix = dict(default=None, required=False),
- requester_pays = dict(default='no', type='bool'),
- rule_id = dict(required=False, type='str'),
- state = dict(default='present', choices=['present', 'absent']),
- status = dict(default='enabled', choices=['enabled', 'disabled']),
- storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
- transition_days = dict(default=None, required=False, type='int'),
- transition_date = dict(default=None, required=False, type='str')
+ name=dict(required=True, type='str'),
+ expiration_days=dict(default=None, required=False, type='int'),
+ expiration_date=dict(default=None, required=False, type='str'),
+ prefix=dict(default=None, required=False),
+ requester_pays=dict(default='no', type='bool'),
+ rule_id=dict(required=False, type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ storage_class=dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
+ transition_days=dict(default=None, required=False, type='int'),
+ transition_date=dict(default=None, required=False, type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive = [
- [ 'expiration_days', 'expiration_date' ],
- [ 'expiration_days', 'transition_date' ],
- [ 'transition_days', 'transition_date' ],
- [ 'transition_days', 'expiration_date' ]
- ]
+ mutually_exclusive=[
+ ['expiration_days', 'expiration_date'],
+ ['expiration_days', 'transition_date'],
+ ['transition_days', 'transition_date'],
+ ['transition_days', 'expiration_date']
+ ]
)
if not HAS_BOTO:
@@ -428,7 +429,7 @@ def main():
except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
- boto_required_version = (2,40,0)
+ boto_required_version = (2, 40, 0)
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
diff --git a/lib/ansible/modules/cloud/amazon/s3_logging.py b/lib/ansible/modules/cloud/amazon/s3_logging.py
index 3fab08e628..429915ab2c 100644
--- a/lib/ansible/modules/cloud/amazon/s3_logging.py
+++ b/lib/ansible/modules/cloud/amazon/s3_logging.py
@@ -137,10 +137,10 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True),
- target_bucket = dict(required=False, default=None),
- target_prefix = dict(required=False, default=""),
- state = dict(required=False, default='present', choices=['present', 'absent'])
+ name=dict(required=True),
+ target_bucket=dict(required=False, default=None),
+ target_prefix=dict(required=False, default=""),
+ state=dict(required=False, default='present', choices=['present', 'absent'])
)
)
diff --git a/lib/ansible/modules/cloud/amazon/s3_sync.py b/lib/ansible/modules/cloud/amazon/s3_sync.py
index 54274e88af..9bebc19455 100644
--- a/lib/ansible/modules/cloud/amazon/s3_sync.py
+++ b/lib/ansible/modules/cloud/amazon/s3_sync.py
@@ -256,7 +256,6 @@ DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
-
"""
calculates a multipart upload etag for amazon s3
diff --git a/lib/ansible/modules/cloud/amazon/s3_website.py b/lib/ansible/modules/cloud/amazon/s3_website.py
index 43658181f1..5cb0dc9b9f 100644
--- a/lib/ansible/modules/cloud/amazon/s3_website.py
+++ b/lib/ansible/modules/cloud/amazon/s3_website.py
@@ -180,10 +180,10 @@ def _create_website_configuration(suffix, error_key, redirect_all_requests):
website_configuration = {}
if error_key is not None:
- website_configuration['ErrorDocument'] = { 'Key': error_key }
+ website_configuration['ErrorDocument'] = {'Key': error_key}
if suffix is not None:
- website_configuration['IndexDocument'] = { 'Suffix': suffix }
+ website_configuration['IndexDocument'] = {'Suffix': suffix}
if redirect_all_requests is not None:
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
@@ -288,10 +288,10 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
- mutually_exclusive = [
+ mutually_exclusive=[
['redirect_all_requests', 'suffix'],
['redirect_all_requests', 'error_key']
- ])
+ ])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
diff --git a/lib/ansible/modules/cloud/amazon/sns_topic.py b/lib/ansible/modules/cloud/amazon/sns_topic.py
index e0e3ae6a52..fe7c1115ee 100644
--- a/lib/ansible/modules/cloud/amazon/sns_topic.py
+++ b/lib/ansible/modules/cloud/amazon/sns_topic.py
@@ -176,7 +176,7 @@ class SnsTopicManager(object):
def _get_boto_connection(self):
try:
return connect_to_aws(boto.sns, self.region,
- **self.aws_connect_params)
+ **self.aws_connect_params)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
@@ -194,7 +194,6 @@ class SnsTopicManager(object):
break
return [t['TopicArn'] for t in topics]
-
def _arn_topic_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._get_all_topics()
@@ -203,7 +202,6 @@ class SnsTopicManager(object):
if topic.endswith(lookup_topic):
return topic
-
def _create_topic(self):
self.changed = True
self.topic_created = True
@@ -214,57 +212,51 @@ class SnsTopicManager(object):
time.sleep(3)
self.arn_topic = self._arn_topic_lookup()
-
def _set_topic_attrs(self):
- topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
- ['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
- ['Attributes']
+ topic_attributes = self.connection.get_topic_attributes(self.arn_topic)['GetTopicAttributesResponse']['GetTopicAttributesResult']['Attributes']
if self.display_name and self.display_name != topic_attributes['DisplayName']:
self.changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
- self.display_name)
+ self.display_name)
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
self.changed = True
self.attributes_set.append('policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
- json.dumps(self.policy))
+ json.dumps(self.policy))
- if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
- self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
+ if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
+ self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
self.changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
- json.dumps(self.delivery_policy))
-
+ json.dumps(self.delivery_policy))
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
-
def _get_topic_subs(self):
next_token = None
while True:
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
- self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
- ['ListSubscriptionsByTopicResult']['Subscriptions'])
- next_token = response['ListSubscriptionsByTopicResponse'] \
- ['ListSubscriptionsByTopicResult']['NextToken']
+ self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse']
+ ['ListSubscriptionsByTopicResult']['Subscriptions'])
+ next_token = response['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken']
if not next_token:
break
def _set_topic_subs(self):
subscriptions_existing_list = []
desired_subscriptions = [(sub['protocol'],
- self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
- self.subscriptions]
+ self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
+ self.subscriptions]
if self.subscriptions_existing:
for sub in self.subscriptions_existing:
@@ -284,7 +276,6 @@ class SnsTopicManager(object):
if not self.check_mode:
self.connection.subscribe(self.arn_topic, protocol, endpoint)
-
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
@@ -295,14 +286,12 @@ class SnsTopicManager(object):
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
-
def _delete_topic(self):
self.topic_deleted = True
self.changed = True
if not self.check_mode:
self.connection.delete_topic(self.arn_topic)
-
def ensure_ok(self):
self.arn_topic = self._arn_topic_lookup()
if not self.arn_topic:
@@ -319,7 +308,6 @@ class SnsTopicManager(object):
self._delete_subscriptions()
self._delete_topic()
-
def get_info(self):
info = {
'name': self.name,
@@ -341,14 +329,13 @@ class SnsTopicManager(object):
return info
-
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present',
- 'absent']),
+ 'absent']),
display_name=dict(type='str', required=False),
policy=dict(type='dict', required=False),
delivery_policy=dict(type='dict', required=False),
diff --git a/lib/ansible/modules/cloud/amazon/sts_assume_role.py b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
index 33099de864..f4bf58bea4 100644
--- a/lib/ansible/modules/cloud/amazon/sts_assume_role.py
+++ b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
@@ -113,17 +113,18 @@ def assume_role_policy(connection, module):
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- role_arn = dict(required=True, default=None),
- role_session_name = dict(required=True, default=None),
- duration_seconds = dict(required=False, default=None, type='int'),
- external_id = dict(required=False, default=None),
- policy = dict(required=False, default=None),
- mfa_serial_number = dict(required=False, default=None),
- mfa_token = dict(required=False, default=None)
+ role_arn=dict(required=True, default=None),
+ role_session_name=dict(required=True, default=None),
+ duration_seconds=dict(required=False, default=None, type='int'),
+ external_id=dict(required=False, default=None),
+ policy=dict(required=False, default=None),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None)
)
)
diff --git a/lib/ansible/modules/cloud/amazon/sts_session_token.py b/lib/ansible/modules/cloud/amazon/sts_session_token.py
index 3d766683d7..12eda6e857 100644
--- a/lib/ansible/modules/cloud/amazon/sts_session_token.py
+++ b/lib/ansible/modules/cloud/amazon/sts_session_token.py
@@ -108,6 +108,7 @@ def normalize_credentials(credentials):
'expiration': expiration
}
+
def get_session_token(connection, module):
duration_seconds = module.params.get('duration_seconds')
mfa_serial_number = module.params.get('mfa_serial_number')
@@ -131,13 +132,14 @@ def get_session_token(connection, module):
credentials = normalize_credentials(response.get('Credentials', {}))
module.exit_json(changed=changed, sts_creds=credentials)
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
- duration_seconds = dict(required=False, default=None, type='int'),
- mfa_serial_number = dict(required=False, default=None),
- mfa_token = dict(required=False, default=None)
+ duration_seconds=dict(required=False, default=None, type='int'),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None)
)
)
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
index 00d8e0a97d..81df15ca59 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
@@ -512,7 +512,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
if self.wait_for_deployment_completion:
deployment_result = self.get_poller_result(result)
while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
- 'Succeeded']:
+ 'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
@@ -535,7 +535,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
"""
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
- result.wait() # Blocking wait till the delete is finished
+ result.wait() # Blocking wait till the delete is finished
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
@@ -569,7 +569,7 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
- (exc.status_code, exc.message))
+ (exc.status_code, exc.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
@@ -642,10 +642,10 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
- id=ip.id,
- public_ip=ip.ip_address,
- public_ip_allocation_method=str(ip.public_ip_allocation_method)
- )
+ id=ip.id,
+ public_ip=ip.ip_address,
+ public_ip_allocation_method=str(ip.public_ip_allocation_method)
+ )
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label': ip.dns_settings.domain_name_label,
@@ -657,9 +657,9 @@ class AzureRMDeploymentManager(AzureRMModuleBase):
return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
for nic_obj in (self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics)
- for public_ip_id in [ip_conf_instance.public_ip_address.id
- for ip_conf_instance in nic_obj.ip_configurations
- if ip_conf_instance.public_ip_address]]
+ for public_ip_id in [ip_conf_instance.public_ip_address.id
+ for ip_conf_instance in nic_obj.ip_configurations
+ if ip_conf_instance.public_ip_address]]
def main():
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py b/lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py
index 81213908cb..13f027d176 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py
@@ -154,6 +154,7 @@ def managed_disk_to_dict(managed_disk):
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
+
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py b/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py
index 9dc666a55c..942d6b0082 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py
@@ -219,7 +219,7 @@ state:
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import NetworkInterface, NetworkInterfaceIPConfiguration, Subnet, \
- PublicIPAddress, NetworkSecurityGroup
+ PublicIPAddress, NetworkSecurityGroup
except ImportError:
# This is handled in azure_rm_common
pass
@@ -442,7 +442,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
if not pip and self.public_ip:
# create a default public_ip
pip = self.create_default_pip(self.resource_group, self.location, self.name,
- self.public_ip_allocation_method)
+ self.public_ip_allocation_method)
nic = NetworkInterface(
location=self.location,
@@ -475,8 +475,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
tags=results['tags'],
ip_configurations=[
NetworkInterfaceIPConfiguration(
- private_ip_allocation_method=
- results['ip_configuration']['private_ip_allocation_method']
+ private_ip_allocation_method=results['ip_configuration']['private_ip_allocation_method']
)
]
)
@@ -496,7 +495,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
- #name=pip.name,
+ # name=pip.name,
if results['network_security_group'].get('id'):
nsg = self.get_security_group(results['network_security_group']['name'])
@@ -549,8 +548,8 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
subnet = self.network_client.subnets.get(self.resource_group, vnet_name, subnet_name)
except Exception as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(subnet_name,
- vnet_name,
- str(exc)))
+ vnet_name,
+ str(exc)))
return subnet
def get_security_group(self, name):
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py b/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py
index cfaa21dea0..2d5a8d7039 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py
@@ -193,7 +193,7 @@ class AzureRMPublicIPAddress(AzureRMModuleBase):
if self.domain_name != results['dns_settings'].get('domain_name_label'):
self.log('CHANGED: domain_name_label')
changed = True
- results['dns_settings']['domain_name_label'] =self.domain_name
+ results['dns_settings']['domain_name_label'] = self.domain_name
if self.allocation_method != results['public_ip_allocation_method']:
self.log("CHANGED: allocation_method")
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py b/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py
index f427c8a562..49b2743e7c 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py
@@ -183,7 +183,6 @@ class AzureRMPublicIPFacts(AzureRMModuleBase):
return results
-
def main():
AzureRMPublicIPFacts()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py b/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py
index 4ab2fcc25d..ff3948adef 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py
@@ -155,7 +155,7 @@ try:
from azure.common import AzureMissingResourceHttpError
from azure.mgmt.storage.models import ProvisioningState, SkuName, SkuTier, Kind
from azure.mgmt.storage.models import StorageAccountUpdateParameters, CustomDomain, \
- StorageAccountCreateParameters, Sku
+ StorageAccountCreateParameters, Sku
except ImportError:
# This is handled in azure_rm_common
pass
@@ -226,7 +226,7 @@ class AzureRMStorageAccount(AzureRMModuleBase):
self.account_dict = self.get_account()
if self.state == 'present' and self.account_dict and \
- self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE :
+ self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
"to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
@@ -280,7 +280,7 @@ class AzureRMStorageAccount(AzureRMModuleBase):
resource_group=self.resource_group,
type=account_obj.type,
access_tier=(account_obj.access_tier.value
- if account_obj.access_tier is not None else None),
+ if account_obj.access_tier is not None else None),
sku_tier=account_obj.sku.tier.value,
sku_name=account_obj.sku.name.value,
provisioning_state=account_obj.provisioning_state.value,
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_subnet.py b/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
index 507ba2de9f..7e35e48351 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
@@ -132,7 +132,6 @@ except ImportError:
pass
-
def subnet_to_dict(subnet):
result = dict(
id=subnet.id,
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
index 23f3d7f871..9ac237ed5f 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
@@ -598,14 +598,14 @@ try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from azure.mgmt.compute.models import NetworkInterfaceReference, \
- VirtualMachine, HardwareProfile, \
- StorageProfile, OSProfile, OSDisk, DataDisk, \
- VirtualHardDisk, ManagedDiskParameters, \
- ImageReference, NetworkProfile, LinuxConfiguration, \
- SshConfiguration, SshPublicKey, VirtualMachineSizeTypes, \
- DiskCreateOptionTypes, Plan, SubResource
+ VirtualMachine, HardwareProfile, \
+ StorageProfile, OSProfile, OSDisk, DataDisk, \
+ VirtualHardDisk, ManagedDiskParameters, \
+ ImageReference, NetworkProfile, LinuxConfiguration, \
+ SshConfiguration, SshPublicKey, VirtualMachineSizeTypes, \
+ DiskCreateOptionTypes, Plan, SubResource
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, NetworkInterface, \
- NetworkInterfaceIPConfiguration, Subnet
+ NetworkInterfaceIPConfiguration, Subnet
from azure.mgmt.storage.models import StorageAccountCreateParameters, Sku
from azure.mgmt.storage.models import Kind, SkuTier, SkuName
except ImportError:
@@ -659,7 +659,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
open_ports=dict(type='list'),
network_interface_names=dict(type='list', aliases=['network_interfaces']),
remove_on_absent=dict(type='list', default=['all']),
- virtual_network_resource_group=dict(type = 'str'),
+ virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
subnet_name=dict(type='str', aliases=['subnet']),
allocated=dict(type='bool', default=True),
@@ -1297,7 +1297,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
nic_names = []
pip_names = []
- if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
+ if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
# store the attached vhd info so we can nuke it after the VM is gone
if(vm.storage_profile.os_disk.managed_disk):
self.log('Storing managed disk ID for deletion')
@@ -1319,7 +1319,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
self.results['deleted_managed_disk_ids'] = managed_disk_ids
- if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
+ if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
# store the attached nic info so we can nuke them after the VM is gone
self.log('Storing NIC names for deletion.')
for interface in vm.network_profile.network_interfaces:
@@ -1327,7 +1327,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
nic_names.append(id_dict['networkInterfaces'])
self.log('NIC names to delete {0}'.format(', '.join(nic_names)))
self.results['deleted_network_interfaces'] = nic_names
- if self.remove_on_absent.intersection(set(['all','public_ips'])):
+ if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
# also store each nic's attached public IPs and delete after the NIC is gone
for name in nic_names:
nic = self.get_network_interface(name)
@@ -1349,18 +1349,18 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
# TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
# TODO: best-effort to keep deleting other linked resources if we encounter an error
- if self.remove_on_absent.intersection(set(['all','virtual_storage'])):
+ if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
self.log('Deleting VHDs')
self.delete_vm_storage(vhd_uris)
self.log('Deleting managed disks')
self.delete_managed_disks(managed_disk_ids)
- if self.remove_on_absent.intersection(set(['all','network_interfaces'])):
+ if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
self.log('Deleting network interfaces')
for name in nic_names:
self.delete_nic(name)
- if self.remove_on_absent.intersection(set(['all','public_ips'])):
+ if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
self.log('Deleting public IPs')
for name in pip_names:
self.delete_pip(name)
@@ -1461,6 +1461,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
return ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
+
def get_availability_set(self, resource_group, name):
try:
return self.compute_client.availability_sets.get(resource_group, name)
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
index 674743d222..07c92180e0 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
@@ -195,7 +195,7 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
self.dns_servers = None
self.purge_dns_servers = None
- self.results=dict(
+ self.results = dict(
changed=False,
state=dict()
)
@@ -327,7 +327,6 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
self.delete_virtual_network()
self.results['state']['status'] = 'Deleted'
-
return self.results
def create_or_update_vnet(self, vnet):
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py
index 50dd1a95a9..1d78ad0522 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py
@@ -178,6 +178,7 @@ class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
+
def main():
AzureRMNetworkInterfaceFacts()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_host.py b/lib/ansible/modules/cloud/cloudstack/cs_host.py
index 41d38248a8..3f3fdc9170 100644
--- a/lib/ansible/modules/cloud/cloudstack/cs_host.py
+++ b/lib/ansible/modules/cloud/cloudstack/cs_host.py
@@ -464,7 +464,7 @@ class AnsibleCloudStackHost(AnsibleCloudStack):
# Set host allocationstate to be disabled/enabled
elif host['resourcestate'].lower() in list(self.allocation_states_for_update.keys()):
- host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
+ host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
else:
host['allocationstate'] = host['resourcestate']
diff --git a/lib/ansible/modules/cloud/docker/docker_container.py b/lib/ansible/modules/cloud/docker/docker_container.py
index 6d41a7ca14..05fda7dc08 100644
--- a/lib/ansible/modules/cloud/docker/docker_container.py
+++ b/lib/ansible/modules/cloud/docker/docker_container.py
@@ -951,7 +951,7 @@ class TaskParameters(DockerBaseClass):
Returns parameters used to create a HostConfig object
'''
- host_config_params=dict(
+ host_config_params = dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
@@ -1163,7 +1163,7 @@ class TaskParameters(DockerBaseClass):
options = dict(
Type=self.log_driver,
- Config = dict()
+ Config=dict()
)
if self.log_options is not None:
@@ -1217,7 +1217,6 @@ class TaskParameters(DockerBaseClass):
return network_id
-
class Container(DockerBaseClass):
def __init__(self, container, parameters):
@@ -1570,7 +1569,7 @@ class Container(DockerBaseClass):
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
- ))
+ ))
return expected_devices
def _get_expected_entrypoint(self):
diff --git a/lib/ansible/modules/cloud/docker/docker_image.py b/lib/ansible/modules/cloud/docker/docker_image.py
index a5978fc545..c2e2d78cd1 100644
--- a/lib/ansible/modules/cloud/docker/docker_image.py
+++ b/lib/ansible/modules/cloud/docker/docker_image.py
@@ -430,7 +430,7 @@ class ImageManager(DockerBaseClass):
if not self.check_mode:
status = None
try:
- for line in self.client.push(repository, tag=tag, stream=True, decode=True):
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
diff --git a/lib/ansible/modules/cloud/docker/docker_image_facts.py b/lib/ansible/modules/cloud/docker/docker_image_facts.py
index 3f1d6afe2b..c6dff60ab9 100644
--- a/lib/ansible/modules/cloud/docker/docker_image_facts.py
+++ b/lib/ansible/modules/cloud/docker/docker_image_facts.py
@@ -217,7 +217,7 @@ class ImageManager(DockerBaseClass):
def main():
argument_spec = dict(
name=dict(type='list'),
- )
+ )
client = AnsibleDockerClient(
argument_spec=argument_spec
diff --git a/lib/ansible/modules/cloud/docker/docker_network.py b/lib/ansible/modules/cloud/docker/docker_network.py
index 1cf2c52108..e4a99b45db 100644
--- a/lib/ansible/modules/cloud/docker/docker_network.py
+++ b/lib/ansible/modules/cloud/docker/docker_network.py
@@ -184,6 +184,7 @@ class TaskParameters(DockerBaseClass):
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
class DockerNetworkManager(object):
def __init__(self, client):
@@ -362,16 +363,16 @@ class DockerNetworkManager(object):
def main():
argument_spec = dict(
- network_name = dict(type='str', required=True, aliases=['name']),
- connected = dict(type='list', default=[], aliases=['containers']),
- state = dict(type='str', default='present', choices=['present', 'absent']),
- driver = dict(type='str', default='bridge'),
- driver_options = dict(type='dict', default={}),
- force = dict(type='bool', default=False),
- appends = dict(type='bool', default=False, aliases=['incremental']),
- ipam_driver = dict(type='str', default=None),
- ipam_options = dict(type='dict', default={}),
- debug = dict(type='bool', default=False)
+ network_name=dict(type='str', required=True, aliases=['name']),
+ connected=dict(type='list', default=[], aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str', default=None),
+ ipam_options=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
diff --git a/lib/ansible/modules/cloud/google/gc_storage.py b/lib/ansible/modules/cloud/google/gc_storage.py
index ee3f4edfc0..d190ca8b7f 100644
--- a/lib/ansible/modules/cloud/google/gc_storage.py
+++ b/lib/ansible/modules/cloud/google/gc_storage.py
@@ -179,27 +179,26 @@ def grant_check(module, gs, obj):
try:
acp = obj.get_acl()
if module.params.get('permission') == 'public-read':
- grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
if not grant:
obj.set_acl('public-read')
module.exit_json(changed=True, result="The objects permission as been set to public-read")
if module.params.get('permission') == 'authenticated-read':
- grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
if not grant:
obj.set_acl('authenticated-read')
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
return True
-
def key_check(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
if key_check:
grant_check(module, gs, key_check)
return True
@@ -213,7 +212,7 @@ def keysum(module, gs, bucket, obj):
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
- etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
return md5_remote
@@ -223,7 +222,7 @@ def bucket_check(module, gs, bucket):
try:
result = gs.lookup(bucket)
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
if result:
grant_check(module, gs, result)
return True
@@ -237,7 +236,7 @@ def create_bucket(module, gs, bucket):
bucket.set_acl(module.params.get('permission'))
bucket.configure_versioning(module.params.get('versioning'))
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
if bucket:
return True
@@ -251,7 +250,7 @@ def delete_bucket(module, gs, bucket):
bucket.delete()
return True
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def delete_key(module, gs, bucket, obj):
@@ -260,7 +259,7 @@ def delete_key(module, gs, bucket, obj):
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def create_dirkey(module, gs, bucket, obj):
@@ -270,7 +269,7 @@ def create_dirkey(module, gs, bucket, obj):
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def path_check(path):
@@ -308,7 +307,7 @@ def upload_gsfile(module, gs, bucket, obj, src, expiry):
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except gs.provider.storage_copy_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def download_gsfile(module, gs, bucket, obj, dest):
@@ -318,7 +317,7 @@ def download_gsfile(module, gs, bucket, obj, dest):
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except gs.provider.storage_copy_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def download_gsstr(module, gs, bucket, obj):
@@ -328,7 +327,7 @@ def download_gsstr(module, gs, bucket, obj):
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except gs.provider.storage_copy_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def get_download_url(module, gs, bucket, obj, expiry):
@@ -338,7 +337,7 @@ def get_download_url(module, gs, bucket, obj, expiry):
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
except gs.provider.storage_response_error as e:
- module.fail_json(msg= str(e))
+ module.fail_json(msg=str(e))
def handle_get(module, gs, bucket, obj, overwrite, dest):
@@ -355,7 +354,7 @@ def handle_get(module, gs, bucket, obj, overwrite, dest):
def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# Lets check to see if bucket exists to get ground truth.
bucket_rc = bucket_check(module, gs, bucket)
- key_rc = key_check(module, gs, bucket, obj)
+ key_rc = key_check(module, gs, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucket_rc and key_rc:
@@ -380,7 +379,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
def handle_delete(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
- module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket))
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
if bucket and obj:
@@ -409,7 +408,7 @@ def handle_create(module, gs, bucket, obj):
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, dirobj):
- module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
else:
create_dirkey(module, gs, bucket, dirobj)
else:
@@ -419,35 +418,35 @@ def handle_create(module, gs, bucket, obj):
def main():
module = AnsibleModule(
- argument_spec = dict(
- bucket = dict(required=True),
- object = dict(default=None, type='path'),
- src = dict(default=None),
- dest = dict(default=None, type='path'),
- expiration = dict(type='int', default=600, aliases=['expiry']),
- mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
- permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
- headers = dict(type='dict', default={}),
- gs_secret_key = dict(no_log=True, required=True),
- gs_access_key = dict(required=True),
- overwrite = dict(default=True, type='bool', aliases=['force']),
- region = dict(default='US', type='str'),
- versioning = dict(default='no', type='bool')
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default='no', type='bool')
),
)
if not HAS_BOTO:
module.fail_json(msg='boto 2.9+ required for this module')
- bucket = module.params.get('bucket')
- obj = module.params.get('object')
- src = module.params.get('src')
- dest = module.params.get('dest')
- mode = module.params.get('mode')
- expiry = module.params.get('expiration')
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
gs_secret_key = module.params.get('gs_secret_key')
gs_access_key = module.params.get('gs_access_key')
- overwrite = module.params.get('overwrite')
+ overwrite = module.params.get('overwrite')
if mode == 'put':
if not src or not object:
@@ -459,7 +458,7 @@ def main():
try:
gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg = str(e))
+ module.fail_json(msg=str(e))
if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
diff --git a/lib/ansible/modules/cloud/google/gcdns_record.py b/lib/ansible/modules/cloud/google/gcdns_record.py
index d21807fd8f..a96e016c97 100644
--- a/lib/ansible/modules/cloud/google/gcdns_record.py
+++ b/lib/ansible/modules/cloud/google/gcdns_record.py
@@ -348,7 +348,7 @@ PROVIDER = Provider.GOOGLE
# I'm hard-coding the supported record types here, because they (hopefully!)
# shouldn't change much, and it allows me to use it as a "choices" parameter
# in an AnsibleModule argument_spec.
-SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
################################################################################
@@ -378,8 +378,8 @@ def create_record(module, gcdns, zone, record):
# The record doesn't match, so we need to check if we can overwrite it.
if not overwrite:
module.fail_json(
- msg = 'cannot overwrite existing record, overwrite protection enabled',
- changed = False
+ msg='cannot overwrite existing record, overwrite protection enabled',
+ changed=False
)
# The record either doesn't exist, or it exists and we can overwrite it.
@@ -393,9 +393,9 @@ def create_record(module, gcdns, zone, record):
# not when combined (e.g., an 'A' record with "www.example.com"
# as its value).
module.fail_json(
- msg = 'value is invalid for the given type: ' +
- "%s, got value: %s" % (record_type, record_data),
- changed = False
+ msg='value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed=False
)
elif error.code == 'cnameResourceRecordSetConflict':
@@ -403,8 +403,8 @@ def create_record(module, gcdns, zone, record):
# already have another type of resource record with the name
# domain name.
module.fail_json(
- msg = "non-CNAME resource record already exists: %s" % record_name,
- changed = False
+ msg="non-CNAME resource record already exists: %s" % record_name,
+ changed=False
)
else:
@@ -428,8 +428,8 @@ def create_record(module, gcdns, zone, record):
try:
gcdns.create_record(record.name, record.zone, record.type, record.data)
module.fail_json(
- msg = 'error updating record, the original record was restored',
- changed = False
+ msg='error updating record, the original record was restored',
+ changed=False
)
except LibcloudError:
# We deleted the old record, couldn't create the new record, and
@@ -437,12 +437,12 @@ def create_record(module, gcdns, zone, record):
# record to the failure output so the user can resore it if
# necessary.
module.fail_json(
- msg = 'error updating record, and could not restore original record, ' +
- "original name: %s " % record.name +
- "original zone: %s " % record.zone +
- "original type: %s " % record.type +
- "original data: %s" % record.data,
- changed = True)
+ msg='error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed=True)
return True
@@ -450,8 +450,8 @@ def create_record(module, gcdns, zone, record):
def remove_record(module, gcdns, record):
"""Remove a resource record."""
- overwrite = module.boolean(module.params['overwrite'])
- ttl = module.params['ttl']
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
record_data = module.params['record_data']
# If there is no record, we're obviously done.
@@ -463,10 +463,10 @@ def remove_record(module, gcdns, record):
if not overwrite:
if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
module.fail_json(
- msg = 'cannot delete due to non-matching ttl or record_data: ' +
- "ttl: %d, record_data: %s " % (ttl, record_data) +
- "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
- changed = False
+ msg='cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed=False
)
# If we got to this point, we're okay to delete the record.
@@ -529,30 +529,30 @@ def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
def _sanity_check(module):
"""Run sanity checks that don't depend on info from the zone/record."""
- overwrite = module.params['overwrite']
+ overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
- state = module.params['state']
- ttl = module.params['ttl']
+ state = module.params['state']
+ ttl = module.params['ttl']
record_data = module.params['record_data']
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
- msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
- changed = False
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
- msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
- changed = False
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
)
# A negative TTL is not permitted (how would they even work?!).
if ttl < 0:
module.fail_json(
- msg = 'TTL cannot be less than zero, got: %d' % ttl,
- changed = False
+ msg='TTL cannot be less than zero, got: %d' % ttl,
+ changed=False
)
# Deleting SOA records is not permitted.
@@ -572,8 +572,8 @@ def _sanity_check(module):
socket.inet_aton(value)
except socket.error:
module.fail_json(
- msg = 'invalid A record value, got: %s' % value,
- changed = False
+ msg='invalid A record value, got: %s' % value,
+ changed=False
)
# AAAA records must contain valid IPv6 addresses.
@@ -583,23 +583,23 @@ def _sanity_check(module):
socket.inet_pton(socket.AF_INET6, value)
except socket.error:
module.fail_json(
- msg = 'invalid AAAA record value, got: %s' % value,
- changed = False
+ msg='invalid AAAA record value, got: %s' % value,
+ changed=False
)
# CNAME and SOA records can't have multiple values.
if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
module.fail_json(
- msg = 'CNAME or SOA records cannot have more than one value, ' +
- "got: %s" % record_data,
- changed = False
+ msg='CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed=False
)
# Google Cloud DNS does not support wildcard NS records.
if record_type == 'NS' and record_name[0] == '*':
module.fail_json(
- msg = "wildcard NS records not allowed, got: %s" % record_name,
- changed = False
+ msg="wildcard NS records not allowed, got: %s" % record_name,
+ changed=False
)
# Values for txt records must begin and end with a double quote.
@@ -607,32 +607,32 @@ def _sanity_check(module):
for value in record_data:
if value[0] != '"' and value[-1] != '"':
module.fail_json(
- msg = 'TXT record_data must be enclosed in double quotes, ' +
- 'got: %s' % value,
- changed = False
+ msg='TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed=False
)
def _additional_sanity_checks(module, zone):
"""Run input sanity checks that depend on info from the zone/record."""
- overwrite = module.params['overwrite']
+ overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
- state = module.params['state']
+ state = module.params['state']
# CNAME records are not allowed to have the same name as the root domain.
if record_type == 'CNAME' and record_name == zone.domain:
module.fail_json(
- msg = 'CNAME records cannot match the zone name',
- changed = False
+ msg='CNAME records cannot match the zone name',
+ changed=False
)
# The root domain must always have an NS record.
if record_type == 'NS' and record_name == zone.domain and state == 'absent':
module.fail_json(
- msg = 'cannot delete root NS records',
- changed = False
+ msg='cannot delete root NS records',
+ changed=False
)
# Updating NS records with the name as the root domain is not allowed
@@ -640,16 +640,16 @@ def _additional_sanity_checks(module, zone):
# records cannot be removed.
if record_type == 'NS' and record_name == zone.domain and overwrite:
module.fail_json(
- msg = 'cannot update existing root NS records',
- changed = False
+ msg='cannot update existing root NS records',
+ changed=False
)
# SOA records with names that don't match the root domain are not permitted
# (and wouldn't make sense anyway).
if record_type == 'SOA' and record_name != zone.domain:
module.fail_json(
- msg = 'non-root SOA records are not permitted, got: %s' % record_name,
- changed = False
+ msg='non-root SOA records are not permitted, got: %s' % record_name,
+ changed=False
)
@@ -661,46 +661,46 @@ def main():
"""Main function"""
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default='present', choices=['present', 'absent'], type='str'),
- record = dict(required=True, aliases=['name'], type='str'),
- zone = dict(type='str'),
- zone_id = dict(type='str'),
- type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
- record_data = dict(aliases=['value'], type='list'),
- ttl = dict(default=300, type='int'),
- overwrite = dict(default=False, type='bool'),
- service_account_email = dict(type='str'),
- pem_file = dict(type='path'),
- credentials_file = dict(type='path'),
- project_id = dict(type='str')
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ record=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(type='str'),
+ zone_id=dict(type='str'),
+ type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data=dict(aliases=['value'], type='list'),
+ ttl=dict(default=300, type='int'),
+ overwrite=dict(default=False, type='bool'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
),
- required_if = [
+ required_if=[
('state', 'present', ['record_data']),
('overwrite', False, ['record_data'])
],
- required_one_of = [['zone', 'zone_id']],
- supports_check_mode = True
+ required_one_of=[['zone', 'zone_id']],
+ supports_check_mode=True
)
_sanity_check(module)
record_name = module.params['record']
record_type = module.params['type']
- state = module.params['state']
- ttl = module.params['ttl']
- zone_name = module.params['zone']
- zone_id = module.params['zone_id']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
json_output = dict(
- state = state,
- record = record_name,
- zone = zone_name,
- zone_id = zone_id,
- type = record_type,
- record_data = module.params['record_data'],
- ttl = ttl,
- overwrite = module.boolean(module.params['overwrite'])
+ state=state,
+ record=record_name,
+ zone=zone_name,
+ zone_id=zone_id,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl,
+ overwrite=module.boolean(module.params['overwrite'])
)
# Google Cloud DNS wants the trailing dot on all DNS names.
@@ -718,13 +718,13 @@ def main():
zone = _get_zone(gcdns, zone_name, zone_id)
if zone is None and zone_name is not None:
module.fail_json(
- msg = 'zone name was not found: %s' % zone_name,
- changed = False
+ msg='zone name was not found: %s' % zone_name,
+ changed=False
)
elif zone is None and zone_id is not None:
module.fail_json(
- msg = 'zone id was not found: %s' % zone_id,
- changed = False
+ msg='zone id was not found: %s' % zone_id,
+ changed=False
)
# Populate the returns with the actual zone information.
@@ -738,8 +738,8 @@ def main():
except InvalidRequestError:
# We gave Google Cloud DNS an invalid DNS record name.
module.fail_json(
- msg = 'record name is invalid: %s' % record_name,
- changed = False
+ msg='record name is invalid: %s' % record_name,
+ changed=False
)
_additional_sanity_checks(module, zone)
@@ -752,20 +752,20 @@ def main():
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
- record = record.data['name'],
- type = record.data['type'],
- record_data = record.data['rrdatas'],
- ttl = record.data['ttl']
+ record=record.data['name'],
+ type=record.data['type'],
+ record_data=record.data['rrdatas'],
+ ttl=record.data['ttl']
)
diff['before_header'] = "%s:%s" % (record_type, record_name)
# Create, remove, or modify the record.
if state == 'present':
diff['after'] = dict(
- record = record_name,
- type = record_type,
- record_data = module.params['record_data'],
- ttl = ttl
+ record=record_name,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl
)
diff['after_header'] = "%s:%s" % (record_type, record_name)
diff --git a/lib/ansible/modules/cloud/google/gcdns_zone.py b/lib/ansible/modules/cloud/google/gcdns_zone.py
index a2511f82cd..cd7f89ea7d 100644
--- a/lib/ansible/modules/cloud/google/gcdns_zone.py
+++ b/lib/ansible/modules/cloud/google/gcdns_zone.py
@@ -145,18 +145,19 @@ MINIMUM_LIBCLOUD_VERSION = '0.19.0'
PROVIDER = Provider.GOOGLE
# The URL used to verify ownership of a zone in Google Cloud DNS.
-ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
+ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
################################################################################
# Functions
################################################################################
+
def create_zone(module, gcdns, zone):
"""Creates a new Google Cloud DNS zone."""
description = module.params['description']
- extra = dict(description = description)
- zone_name = module.params['zone']
+ extra = dict(description=description)
+ zone_name = module.params['zone']
# Google Cloud DNS wants the trailing dot on the domain name.
if zone_name[-1] != '.':
@@ -184,8 +185,8 @@ def create_zone(module, gcdns, zone):
# The zone name or a parameter might be completely invalid. This is
# typically caused by an illegal DNS name (e.g. foo..com).
module.fail_json(
- msg = "zone name is not a valid DNS name: %s" % zone_name,
- changed = False
+ msg="zone name is not a valid DNS name: %s" % zone_name,
+ changed=False
)
elif error.code == 'managedZoneDnsNameNotAvailable':
@@ -193,8 +194,8 @@ def create_zone(module, gcdns, zone):
# names, such as TLDs, ccTLDs, or special domain names such as
# example.com.
module.fail_json(
- msg = "zone name is reserved or already in use: %s" % zone_name,
- changed = False
+ msg="zone name is reserved or already in use: %s" % zone_name,
+ changed=False
)
elif error.code == 'verifyManagedZoneDnsNameOwnership':
@@ -202,8 +203,8 @@ def create_zone(module, gcdns, zone):
# it. This occurs when a user attempts to create a zone which shares
# a domain name with a zone hosted elsewhere in Google Cloud DNS.
module.fail_json(
- msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
- changed = False
+ msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed=False
)
else:
@@ -226,8 +227,8 @@ def remove_zone(module, gcdns, zone):
# refuse to remove the zone.
if len(zone.list_records()) > 2:
module.fail_json(
- msg = "zone is not empty and cannot be removed: %s" % zone.domain,
- changed = False
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
)
try:
@@ -246,8 +247,8 @@ def remove_zone(module, gcdns, zone):
# the milliseconds between the check and the removal command,
# records were added to the zone.
module.fail_json(
- msg = "zone is not empty and cannot be removed: %s" % zone.domain,
- changed = False
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
)
else:
@@ -273,6 +274,7 @@ def _get_zone(gcdns, zone_name):
return found_zone
+
def _sanity_check(module):
"""Run module sanity checks."""
@@ -281,40 +283,41 @@ def _sanity_check(module):
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
- msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
- changed = False
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
- msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
- changed = False
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
)
# Google Cloud DNS does not support the creation of TLDs.
if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
module.fail_json(
- msg = 'cannot create top-level domain: %s' % zone_name,
- changed = False
+ msg='cannot create top-level domain: %s' % zone_name,
+ changed=False
)
################################################################################
# Main
################################################################################
+
def main():
"""Main function"""
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default='present', choices=['present', 'absent'], type='str'),
- zone = dict(required=True, aliases=['name'], type='str'),
- description = dict(default='', type='str'),
- service_account_email = dict(type='str'),
- pem_file = dict(type='path'),
- credentials_file = dict(type='path'),
- project_id = dict(type='str')
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ zone=dict(required=True, aliases=['name'], type='str'),
+ description=dict(default='', type='str'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
),
- supports_check_mode = True
+ supports_check_mode=True
)
_sanity_check(module)
@@ -327,9 +330,9 @@ def main():
zone_name = zone_name + '.'
json_output = dict(
- state = state,
- zone = zone_name,
- description = module.params['description']
+ state=state,
+ zone=zone_name,
+ description=module.params['description']
)
# Build a connection object that was can use to connect with Google
@@ -347,16 +350,16 @@ def main():
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
- zone = zone.domain,
- description = zone.extra['description']
+ zone=zone.domain,
+ description=zone.extra['description']
)
diff['before_header'] = zone_name
# Create or remove the zone.
if state == 'present':
diff['after'] = dict(
- zone = zone_name,
- description = module.params['description']
+ zone=zone_name,
+ description=module.params['description']
)
diff['after_header'] = zone_name
diff --git a/lib/ansible/modules/cloud/lxc/lxc_container.py b/lib/ansible/modules/cloud/lxc/lxc_container.py
index 15bcbc7617..c17cb8b076 100644
--- a/lib/ansible/modules/cloud/lxc/lxc_container.py
+++ b/lib/ansible/modules/cloud/lxc/lxc_container.py
@@ -377,7 +377,7 @@ EXAMPLES = """
- test-container-new-archive-destroyed-clone
"""
-RETURN="""
+RETURN = """
lxc_container:
description: container information
returned: success
@@ -579,7 +579,7 @@ def create_script(command):
f.close()
# Ensure the script is executable.
- os.chmod(script_file, int('0700',8))
+ os.chmod(script_file, int('0700', 8))
# Output log file.
stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
@@ -915,7 +915,7 @@ class LxcContainerManagement(object):
'ips': self.container.get_ips(),
'state': self._get_state(),
'init_pid': int(self.container.init_pid),
- 'name' : self.container_name,
+ 'name': self.container_name,
}
def _unfreeze(self):
@@ -1365,7 +1365,7 @@ class LxcContainerManagement(object):
:type source_dir: ``str``
"""
- old_umask = os.umask(int('0077',8))
+ old_umask = os.umask(int('0077', 8))
archive_path = self.module.params.get('archive_path')
if not os.path.isdir(archive_path):
@@ -1750,7 +1750,7 @@ def main():
)
),
supports_check_mode=False,
- required_if = ([
+ required_if=([
('archive', True, ['archive_path'])
]),
)
diff --git a/lib/ansible/modules/cloud/lxd/lxd_container.py b/lib/ansible/modules/cloud/lxd/lxd_container.py
index bf185c9d3b..12d1bc8965 100644
--- a/lib/ansible/modules/cloud/lxd/lxd_container.py
+++ b/lib/ansible/modules/cloud/lxd/lxd_container.py
@@ -216,7 +216,7 @@ EXAMPLES = '''
flat: true
'''
-RETURN='''
+RETURN = '''
addresses:
description: Mapping from the network device name to a list of IPv4 addresses in the container
returned: when state is started or restarted
@@ -328,7 +328,7 @@ class LXDContainerManagement(object):
return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
def _change_state(self, action, force_stop=False):
- body_json={'action': action, 'timeout': self.timeout}
+ body_json = {'action': action, 'timeout': self.timeout}
if force_stop:
body_json['force'] = True
return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
@@ -527,6 +527,7 @@ class LXDContainerManagement(object):
fail_params['logs'] = e.kwargs['logs']
self.module.fail_json(**fail_params)
+
def main():
"""Ansible Main module."""
@@ -585,7 +586,7 @@ def main():
type='str',
default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
),
- trust_password=dict( type='str', no_log=True )
+ trust_password=dict(type='str', no_log=True)
),
supports_check_mode=False,
)
diff --git a/lib/ansible/modules/cloud/misc/rhevm.py b/lib/ansible/modules/cloud/misc/rhevm.py
index 098ae76e0e..39e846397a 100644
--- a/lib/ansible/modules/cloud/misc/rhevm.py
+++ b/lib/ansible/modules/cloud/misc/rhevm.py
@@ -347,6 +347,7 @@ failed = False
class RHEVConn(object):
'Connection to RHEV-M'
+
def __init__(self, module):
self.module = module
@@ -726,11 +727,11 @@ class RHEVConn(object):
bond.append(ifacelist[slave])
try:
tmpiface = params.Bonding(
- slaves = params.Slaves(host_nic = bond),
- options = params.Options(
- option = [
- params.Option(name = 'miimon', value = '100'),
- params.Option(name = 'mode', value = '4')
+ slaves=params.Slaves(host_nic=bond),
+ options=params.Options(
+ option=[
+ params.Option(name='miimon', value='100'),
+ params.Option(name='mode', value='4')
]
)
)
@@ -741,16 +742,16 @@ class RHEVConn(object):
return False
try:
tmpnetwork = params.HostNIC(
- network = params.Network(name = iface['network']),
- name = iface['name'],
- boot_protocol = iface['boot_protocol'],
- ip = params.IP(
- address = iface['ip'],
- netmask = iface['netmask'],
- gateway = iface['gateway']
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
),
- override_configuration = True,
- bonding = tmpiface)
+ override_configuration=True,
+ bonding=tmpiface)
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
except Exception as e:
@@ -760,13 +761,13 @@ class RHEVConn(object):
return False
else:
tmpnetwork = params.HostNIC(
- network = params.Network(name = iface['network']),
- name = iface['name'],
- boot_protocol = iface['boot_protocol'],
- ip = params.IP(
- address = iface['ip'],
- netmask = iface['netmask'],
- gateway = iface['gateway']
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
))
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
@@ -828,8 +829,8 @@ class RHEVConn(object):
try:
HOST.nics.setupnetworks(params.Action(
force=True,
- check_connectivity = False,
- host_nics = params.HostNics(host_nic = networklist)
+ check_connectivity=False,
+ host_nics=params.HostNics(host_nic=networklist)
))
setMsg('nics are set')
except Exception as e:
@@ -1008,24 +1009,24 @@ class RHEV(object):
VM = self.conn.get_VM(name)
if VM:
vminfo = dict()
- vminfo['uuid'] = VM.id
- vminfo['name'] = VM.name
- vminfo['status'] = VM.status.state
- vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
vminfo['cpu_sockets'] = VM.cpu.topology.sockets
- vminfo['cpu_shares'] = VM.cpu_shares
- vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
- vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
- vminfo['os'] = VM.get_os().type_
- vminfo['del_prot'] = VM.delete_protected
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
try:
- vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
except Exception:
- vminfo['host'] = None
- vminfo['boot_order'] = []
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
for boot_dev in VM.os.get_boot():
vminfo['boot_order'].append(str(boot_dev.dev))
- vminfo['disks'] = []
+ vminfo['disks'] = []
for DISK in VM.disks.list():
disk = dict()
disk['name'] = DISK.name
@@ -1033,7 +1034,7 @@ class RHEV(object):
disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
disk['interface'] = DISK.interface
vminfo['disks'].append(disk)
- vminfo['ifaces'] = []
+ vminfo['ifaces'] = []
for NIC in VM.nics.list():
iface = dict()
iface['name'] = str(NIC.name)
@@ -1083,17 +1084,17 @@ class RHEV(object):
bootselect = True
for disk in disks:
- diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
- disksize = disk.get('size', 1)
- diskdomain = disk.get('domain', None)
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
if diskdomain is None:
setMsg("`domain` is a required disk key.")
setFailed()
return False
- diskinterface = disk.get('interface', 'virtio')
- diskformat = disk.get('format', 'raw')
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
diskallocationtype = disk.get('thin', False)
- diskboot = disk.get('bootable', False)
+ diskboot = disk.get('bootable', False)
if bootselect is False and counter == 0:
diskboot = True
@@ -1175,7 +1176,7 @@ class RHEV(object):
def setBootOrder(self, vmname, boot_order):
self.__get_conn()
VM = self.conn.get_VM(vmname)
- bootorder = []
+ bootorder = []
for boot_dev in VM.os.get_boot():
bootorder.append(str(boot_dev.dev))
@@ -1469,31 +1470,31 @@ def core(module):
def main():
global module
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
- user = dict(default="admin@internal"),
- password = dict(required=True, no_log=True),
- server = dict(default="127.0.0.1"),
- port = dict(default="443"),
- insecure_api = dict(default=False, type='bool'),
- name = dict(),
- image = dict(default=False),
- datacenter = dict(default="Default"),
- type = dict(default="server", choices=['server', 'desktop', 'host']),
- cluster = dict(default=''),
- vmhost = dict(default=False),
- vmcpu = dict(default="2"),
- vmmem = dict(default="1"),
- disks = dict(),
- osver = dict(default="rhel_6x64"),
- ifaces = dict(aliases=['nics', 'interfaces']),
- timeout = dict(default=False),
- mempol = dict(default="1"),
- vm_ha = dict(default=True),
- cpu_share = dict(default="0"),
- boot_order = dict(default=["network", "hd"]),
- del_prot = dict(default=True, type="bool"),
- cd_drive = dict(default=False)
+ argument_spec=dict(
+ state=dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
+ user=dict(default="admin@internal"),
+ password=dict(required=True, no_log=True),
+ server=dict(default="127.0.0.1"),
+ port=dict(default="443"),
+ insecure_api=dict(default=False, type='bool'),
+ name=dict(),
+ image=dict(default=False),
+ datacenter=dict(default="Default"),
+ type=dict(default="server", choices=['server', 'desktop', 'host']),
+ cluster=dict(default=''),
+ vmhost=dict(default=False),
+ vmcpu=dict(default="2"),
+ vmmem=dict(default="1"),
+ disks=dict(),
+ osver=dict(default="rhel_6x64"),
+ ifaces=dict(aliases=['nics', 'interfaces']),
+ timeout=dict(default=False),
+ mempol=dict(default="1"),
+ vm_ha=dict(default=True),
+ cpu_share=dict(default="0"),
+ boot_order=dict(default=["network", "hd"]),
+ del_prot=dict(default=True, type="bool"),
+ cd_drive=dict(default=False)
),
)
diff --git a/lib/ansible/modules/cloud/misc/serverless.py b/lib/ansible/modules/cloud/misc/serverless.py
index a46ab7ac09..044e788242 100644
--- a/lib/ansible/modules/cloud/misc/serverless.py
+++ b/lib/ansible/modules/cloud/misc/serverless.py
@@ -154,13 +154,13 @@ def get_service_name(module, stage):
def main():
module = AnsibleModule(
argument_spec=dict(
- service_path = dict(required=True, type='path'),
- state = dict(default='present', choices=['present', 'absent'], required=False),
- functions = dict(type='list', required=False),
- region = dict(default='', required=False),
- stage = dict(default='', required=False),
- deploy = dict(default=True, type='bool', required=False),
- serverless_bin_path = dict(required=False, type='path')
+ service_path=dict(required=True, type='path'),
+ state=dict(default='present', choices=['present', 'absent'], required=False),
+ functions=dict(type='list', required=False),
+ region=dict(default='', required=False),
+ stage=dict(default='', required=False),
+ deploy=dict(default=True, type='bool', required=False),
+ serverless_bin_path=dict(required=False, type='path')
),
)
@@ -198,13 +198,13 @@ def main():
if rc != 0:
if state == 'absent' and "-{}' does not exist".format(stage) in out:
module.exit_json(changed=False, state='absent', command=command,
- out=out, service_name=get_service_name(module, stage))
+ out=out, service_name=get_service_name(module, stage))
module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err))
# gather some facts about the deployment
module.exit_json(changed=True, state='present', out=out, command=command,
- service_name=get_service_name(module, stage))
+ service_name=get_service_name(module, stage))
if __name__ == '__main__':
diff --git a/lib/ansible/modules/cloud/misc/virt_net.py b/lib/ansible/modules/cloud/misc/virt_net.py
index 6759fc347f..96d61e6101 100644
--- a/lib/ansible/modules/cloud/misc/virt_net.py
+++ b/lib/ansible/modules/cloud/misc/virt_net.py
@@ -151,31 +151,32 @@ from ansible.module_utils._text import to_native
VIRT_FAILED = 1
VIRT_SUCCESS = 0
-VIRT_UNAVAILABLE=2
+VIRT_UNAVAILABLE = 2
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
'undefine', 'destroy', 'get_xml', 'define',
- 'modify' ]
-HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
+ 'modify']
+HOST_COMMANDS = ['list_nets', 'facts', 'info']
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
- 0 : "inactive",
- 1 : "active"
+ 0: "inactive",
+ 1: "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
- 0 : "no",
- 1 : "yes"
+ 0: "no",
+ 1: "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
- 0 : "no",
- 1 : "yes"
+ 0: "no",
+ 1: "yes"
}
+
class EntryNotFound(Exception):
pass
@@ -245,9 +246,9 @@ class LibvirtConnection(object):
if host is None:
# add the host
if not self.module.check_mode:
- res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
- libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
- -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
+ res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
+ libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
+ -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
@@ -259,9 +260,9 @@ class LibvirtConnection(object):
return False
else:
if not self.module.check_mode:
- res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
- libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
- -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
+ res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
+ libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
+ -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
@@ -286,18 +287,18 @@ class LibvirtConnection(object):
def get_status2(self, entry):
state = entry.isActive()
- return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
- return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
else:
try:
state = self.find_entry(entryid).isActive()
- return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
except:
- return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
@@ -331,7 +332,7 @@ class LibvirtConnection(object):
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
- return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
+ return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
@@ -358,7 +359,7 @@ class LibvirtConnection(object):
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
- return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
+ return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
def get_dhcp_leases(self, entryid):
network = self.find_entry(entryid)
@@ -398,7 +399,7 @@ class VirtNetwork(object):
results = []
for entry in self.list_nets():
state_blurb = self.conn.get_status(entry)
- results.append("%s %s" % (entry,state_blurb))
+ results.append("%s %s" % (entry, state_blurb))
return results
def autostart(self, entryid):
@@ -481,11 +482,11 @@ class VirtNetwork(object):
def core(module):
- state = module.params.get('state', None)
- name = module.params.get('name', None)
- command = module.params.get('command', None)
- uri = module.params.get('uri', None)
- xml = module.params.get('xml', None)
+ state = module.params.get('state', None)
+ name = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
v = VirtNetwork(uri, module)
@@ -494,33 +495,33 @@ def core(module):
if state and command == 'list_nets':
res = v.list_nets(state=state)
if not isinstance(res, dict):
- res = { command: res }
+ res = {command: res}
return VIRT_SUCCESS, res
if state:
if not name:
- module.fail_json(msg = "state change requires a specified name")
+ module.fail_json(msg="state change requires a specified name")
res['changed'] = False
- if state in [ 'active' ]:
+ if state in ['active']:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
- elif state in [ 'present' ]:
+ elif state in ['present']:
try:
v.get_net(name)
except EntryNotFound:
if not xml:
- module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
+ module.fail_json(msg="network '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
- elif state in [ 'inactive' ]:
+ elif state in ['inactive']:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
- elif state in [ 'undefined', 'absent' ]:
+ elif state in ['undefined', 'absent']:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
@@ -535,10 +536,10 @@ def core(module):
if command:
if command in ENTRY_COMMANDS:
if not name:
- module.fail_json(msg = "%s requires 1 argument: name" % command)
+ module.fail_json(msg="%s requires 1 argument: name" % command)
if command in ('define', 'modify'):
if not xml:
- module.fail_json(msg = command+" requires xml argument")
+ module.fail_json(msg=command + " requires xml argument")
try:
v.get_net(name)
except EntryNotFound:
@@ -551,13 +552,13 @@ def core(module):
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
- res = { command: res }
+ res = {command: res}
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
- res = { command: res }
+ res = {command: res}
return VIRT_SUCCESS, res
else:
@@ -565,7 +566,7 @@ def core(module):
if autostart is not None:
if not name:
- module.fail_json(msg = "state change requires a specified name")
+ module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if autostart:
@@ -584,16 +585,16 @@ def core(module):
def main():
- module = AnsibleModule (
- argument_spec = dict(
- name = dict(aliases=['network']),
- state = dict(choices=['active', 'inactive', 'present', 'absent']),
- command = dict(choices=ALL_COMMANDS),
- uri = dict(default='qemu:///system'),
- xml = dict(),
- autostart = dict(type='bool')
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['network']),
+ state=dict(choices=['active', 'inactive', 'present', 'absent']),
+ command=dict(choices=ALL_COMMANDS),
+ uri=dict(default='qemu:///system'),
+ xml=dict(),
+ autostart=dict(type='bool')
),
- supports_check_mode = True
+ supports_check_mode=True
)
if not HAS_VIRT:
@@ -612,7 +613,7 @@ def main():
except Exception as e:
module.fail_json(msg=str(e))
- if rc != 0: # something went wrong emit the msg
+ if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
diff --git a/lib/ansible/modules/cloud/misc/virt_pool.py b/lib/ansible/modules/cloud/misc/virt_pool.py
index 23a96a31e7..a3994a4ab2 100644
--- a/lib/ansible/modules/cloud/misc/virt_pool.py
+++ b/lib/ansible/modules/cloud/misc/virt_pool.py
@@ -165,49 +165,49 @@ from ansible.module_utils.basic import AnsibleModule
VIRT_FAILED = 1
VIRT_SUCCESS = 0
-VIRT_UNAVAILABLE=2
+VIRT_UNAVAILABLE = 2
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
-HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
+HOST_COMMANDS = ['list_pools', 'facts', 'info']
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
- 0 : "inactive",
- 1 : "active"
+ 0: "inactive",
+ 1: "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
- 0 : "no",
- 1 : "yes"
+ 0: "no",
+ 1: "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
- 0 : "no",
- 1 : "yes"
+ 0: "no",
+ 1: "yes"
}
ENTRY_STATE_INFO_MAP = {
- 0 : "inactive",
- 1 : "building",
- 2 : "running",
- 3 : "degraded",
- 4 : "inaccessible"
+ 0: "inactive",
+ 1: "building",
+ 2: "running",
+ 3: "degraded",
+ 4: "inaccessible"
}
ENTRY_BUILD_FLAGS_MAP = {
- "new" : 0,
- "repair" : 1,
- "resize" : 2,
- "no_overwrite" : 4,
- "overwrite" : 8
+ "new": 0,
+ "repair": 1,
+ "resize": 2,
+ "no_overwrite": 4,
+ "overwrite": 8
}
ENTRY_DELETE_FLAGS_MAP = {
- "normal" : 0,
- "zeroed" : 1
+ "normal": 0,
+ "zeroed": 1
}
ALL_MODES = []
@@ -283,18 +283,18 @@ class LibvirtConnection(object):
def get_status2(self, entry):
state = entry.isActive()
- return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
- return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
else:
try:
state = self.find_entry(entryid).isActive()
- return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
except:
- return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
+ return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
@@ -378,7 +378,7 @@ class LibvirtConnection(object):
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
- return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
+ return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
@@ -405,7 +405,7 @@ class LibvirtConnection(object):
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
- return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
+ return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
@@ -441,7 +441,7 @@ class VirtStoragePool(object):
results = []
for entry in self.list_pools():
state_blurb = self.conn.get_status(entry)
- results.append("%s %s" % (entry,state_blurb))
+ results.append("%s %s" % (entry, state_blurb))
return results
def autostart(self, entryid):
@@ -478,10 +478,10 @@ class VirtStoragePool(object):
return self.conn.define_from_xml(entryid, xml)
def build(self, entryid, flags):
- return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
+ return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0))
def delete(self, entryid, flags):
- return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
+ return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0))
def refresh(self, entryid):
return self.conn.refresh(entryid)
@@ -501,10 +501,10 @@ class VirtStoragePool(object):
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
results[entry] = {
- "status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
- "size_total" : str(data[1]),
- "size_used" : str(data[2]),
- "size_available" : str(data[3]),
+ "status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"),
+ "size_total": str(data[1]),
+ "size_used": str(data[2]),
+ "size_available": str(data[3]),
}
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
@@ -555,13 +555,13 @@ class VirtStoragePool(object):
def core(module):
- state = module.params.get('state', None)
- name = module.params.get('name', None)
- command = module.params.get('command', None)
- uri = module.params.get('uri', None)
- xml = module.params.get('xml', None)
+ state = module.params.get('state', None)
+ name = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
- mode = module.params.get('mode', None)
+ mode = module.params.get('mode', None)
v = VirtStoragePool(uri, module)
res = {}
@@ -569,40 +569,40 @@ def core(module):
if state and command == 'list_pools':
res = v.list_pools(state=state)
if not isinstance(res, dict):
- res = { command: res }
+ res = {command: res}
return VIRT_SUCCESS, res
if state:
if not name:
- module.fail_json(msg = "state change requires a specified name")
+ module.fail_json(msg="state change requires a specified name")
res['changed'] = False
- if state in [ 'active' ]:
+ if state in ['active']:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
- elif state in [ 'present' ]:
+ elif state in ['present']:
try:
v.get_pool(name)
except EntryNotFound:
if not xml:
- module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
+ module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
- elif state in [ 'inactive' ]:
+ elif state in ['inactive']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
- elif state in [ 'undefined', 'absent' ]:
+ elif state in ['undefined', 'absent']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
- elif state in [ 'deleted' ]:
+ elif state in ['deleted']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
@@ -618,10 +618,10 @@ def core(module):
if command:
if command in ENTRY_COMMANDS:
if not name:
- module.fail_json(msg = "%s requires 1 argument: name" % command)
+ module.fail_json(msg="%s requires 1 argument: name" % command)
if command == 'define':
if not xml:
- module.fail_json(msg = "define requires xml argument")
+ module.fail_json(msg="define requires xml argument")
try:
v.get_pool(name)
except EntryNotFound:
@@ -631,22 +631,22 @@ def core(module):
elif command == 'build':
res = v.build(name, mode)
if not isinstance(res, dict):
- res = { 'changed': True, command: res }
+ res = {'changed': True, command: res}
return VIRT_SUCCESS, res
elif command == 'delete':
res = v.delete(name, mode)
if not isinstance(res, dict):
- res = { 'changed': True, command: res }
+ res = {'changed': True, command: res}
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
- res = { command: res }
+ res = {command: res}
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
- res = { command: res }
+ res = {command: res}
return VIRT_SUCCESS, res
else:
@@ -654,7 +654,7 @@ def core(module):
if autostart is not None:
if not name:
- module.fail_json(msg = "state change requires a specified name")
+ module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if autostart:
@@ -673,17 +673,17 @@ def core(module):
def main():
- module = AnsibleModule (
- argument_spec = dict(
- name = dict(aliases=['pool']),
- state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
- command = dict(choices=ALL_COMMANDS),
- uri = dict(default='qemu:///system'),
- xml = dict(),
- autostart = dict(type='bool'),
- mode = dict(choices=ALL_MODES),
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pool']),
+ state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
+ command=dict(choices=ALL_COMMANDS),
+ uri=dict(default='qemu:///system'),
+ xml=dict(),
+ autostart=dict(type='bool'),
+ mode=dict(choices=ALL_MODES),
),
- supports_check_mode = True
+ supports_check_mode=True
)
if not HAS_VIRT:
@@ -702,7 +702,7 @@ def main():
except Exception as e:
module.fail_json(msg=str(e))
- if rc != 0: # something went wrong emit the msg
+ if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
diff --git a/lib/ansible/modules/cloud/misc/xenserver_facts.py b/lib/ansible/modules/cloud/misc/xenserver_facts.py
index b1704f0c88..ba995381d7 100644
--- a/lib/ansible/modules/cloud/misc/xenserver_facts.py
+++ b/lib/ansible/modules/cloud/misc/xenserver_facts.py
@@ -137,12 +137,14 @@ def change_keys(recs, key='uuid', filter_func=None):
return new_recs
+
def get_host(session):
"""Get the host"""
host_recs = session.xenapi.host.get_all()
# We only have one host, so just return its entry
return session.xenapi.host.get_record(host_recs[0])
+
def get_vms(session):
xs_vms = {}
recs = session.xenapi.VM.get_all()
@@ -165,6 +167,7 @@ def get_srs(session):
xs_srs[sr['name_label']] = sr
return xs_srs
+
def main():
module = AnsibleModule({})
diff --git a/lib/ansible/modules/cloud/openstack/os_image.py b/lib/ansible/modules/cloud/openstack/os_image.py
index f9b6c7539f..faf090de3c 100644
--- a/lib/ansible/modules/cloud/openstack/os_image.py
+++ b/lib/ansible/modules/cloud/openstack/os_image.py
@@ -137,20 +137,20 @@ from ansible.module_utils.openstack import openstack_full_argument_spec, opensta
def main():
argument_spec = openstack_full_argument_spec(
- name = dict(required=True),
- id = dict(default=None),
- checksum = dict(default=None),
- disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
- container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
- owner = dict(default=None),
- min_disk = dict(type='int', default=0),
- min_ram = dict(type='int', default=0),
- is_public = dict(type='bool', default=False),
- filename = dict(default=None),
- ramdisk = dict(default=None),
- kernel = dict(default=None),
- properties = dict(type='dict', default={}),
- state = dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ id=dict(default=None),
+ checksum=dict(default=None),
+ disk_format=dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'vhdx', 'ploop']),
+ container_format=dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova', 'docker']),
+ owner=dict(default=None),
+ min_disk=dict(type='int', default=0),
+ min_ram=dict(type='int', default=0),
+ is_public=dict(type='bool', default=False),
+ filename=dict(default=None),
+ ramdisk=dict(default=None),
+ kernel=dict(default=None),
+ properties=dict(type='dict', default={}),
+ state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
@@ -163,13 +163,13 @@ def main():
changed = False
if module.params['checksum']:
- image = cloud.get_image(name_or_id=None,filters={'checksum': module.params['checksum']})
+ image = cloud.get_image(name_or_id=None, filters={'checksum': module.params['checksum']})
else:
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
- kwargs={}
+ kwargs = {}
if module.params['id'] is not None:
kwargs['id'] = module.params['id']
image = cloud.create_image(
diff --git a/lib/ansible/modules/cloud/openstack/os_keypair.py b/lib/ansible/modules/cloud/openstack/os_keypair.py
index 8b4e8db0b5..bc9eff7dfe 100644
--- a/lib/ansible/modules/cloud/openstack/os_keypair.py
+++ b/lib/ansible/modules/cloud/openstack/os_keypair.py
@@ -109,11 +109,11 @@ def _system_state_change(module, keypair):
def main():
argument_spec = openstack_full_argument_spec(
- name = dict(required=True),
- public_key = dict(default=None),
- public_key_file = dict(default=None),
- state = dict(default='present',
- choices=['absent', 'present']),
+ name=dict(required=True),
+ public_key=dict(default=None),
+ public_key_file=dict(default=None),
+ state=dict(default='present',
+ choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
diff --git a/lib/ansible/modules/cloud/openstack/os_nova_flavor.py b/lib/ansible/modules/cloud/openstack/os_nova_flavor.py
index 03a2f646b7..6d2d5f6ebc 100644
--- a/lib/ansible/modules/cloud/openstack/os_nova_flavor.py
+++ b/lib/ansible/modules/cloud/openstack/os_nova_flavor.py
@@ -194,21 +194,21 @@ def _system_state_change(module, flavor):
def main():
argument_spec = openstack_full_argument_spec(
- state = dict(required=False, default='present',
- choices=['absent', 'present']),
- name = dict(required=False),
+ state=dict(required=False, default='present',
+ choices=['absent', 'present']),
+ name=dict(required=False),
# required when state is 'present'
- ram = dict(required=False, type='int'),
- vcpus = dict(required=False, type='int'),
- disk = dict(required=False, type='int'),
-
- ephemeral = dict(required=False, default=0, type='int'),
- swap = dict(required=False, default=0, type='int'),
- rxtx_factor = dict(required=False, default=1.0, type='float'),
- is_public = dict(required=False, default=True, type='bool'),
- flavorid = dict(required=False, default="auto"),
- extra_specs = dict(required=False, default=None, type='dict'),
+ ram=dict(required=False, type='int'),
+ vcpus=dict(required=False, type='int'),
+ disk=dict(required=False, type='int'),
+
+ ephemeral=dict(required=False, default=0, type='int'),
+ swap=dict(required=False, default=0, type='int'),
+ rxtx_factor=dict(required=False, default=1.0, type='float'),
+ is_public=dict(required=False, default=True, type='bool'),
+ flavorid=dict(required=False, default="auto"),
+ extra_specs=dict(required=False, default=None, type='dict'),
)
module_kwargs = openstack_module_kwargs()
@@ -247,9 +247,9 @@ def main():
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
- changed=True
+ changed = True
else:
- changed=False
+ changed = False
old_extra_specs = flavor['extra_specs']
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
diff --git a/lib/ansible/modules/cloud/openstack/os_quota.py b/lib/ansible/modules/cloud/openstack/os_quota.py
index 7a6a6c1b54..c319b12ddb 100644
--- a/lib/ansible/modules/cloud/openstack/os_quota.py
+++ b/lib/ansible/modules/cloud/openstack/os_quota.py
@@ -306,14 +306,17 @@ def _get_volume_quotas(cloud, project):
return cloud.get_volume_quotas(project)
+
def _get_network_quotas(cloud, project):
return cloud.get_network_quotas(project)
+
def _get_compute_quotas(cloud, project):
return cloud.get_compute_quotas(project)
+
def _get_quotas(module, cloud, project):
quota = {}
@@ -334,6 +337,7 @@ def _get_quotas(module, cloud, project):
return quota
+
def _scrub_results(quota):
filter_attr = [
@@ -350,6 +354,7 @@ def _scrub_results(quota):
return quota
+
def _system_state_change_details(module, project_quota_output):
quota_change_request = {}
@@ -368,6 +373,7 @@ def _system_state_change_details(module, project_quota_output):
return (changes_required, quota_change_request)
+
def _system_state_change(module, project_quota_output):
"""
Determine if changes are required to the current project quota.
@@ -386,6 +392,7 @@ def _system_state_change(module, project_quota_output):
else:
return False
+
def main():
argument_spec = openstack_full_argument_spec(
@@ -427,8 +434,8 @@ def main():
)
module = AnsibleModule(argument_spec,
- supports_check_mode=True
- )
+ supports_check_mode=True
+ )
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
@@ -437,7 +444,7 @@ def main():
cloud_params = dict(module.params)
cloud = shade.operator_cloud(**cloud_params)
- #In order to handle the different volume types we update module params after.
+ # In order to handle the different volume types we update module params after.
dynamic_types = [
'gigabytes_types',
'snapshots_types',
@@ -448,22 +455,22 @@ def main():
for k, v in module.params[dynamic_type].items():
module.params[k] = int(v)
- #Get current quota values
+ # Get current quota values
project_quota_output = _get_quotas(module, cloud, cloud_params['name'])
changes_required = False
if module.params['state'] == "absent":
- #If a quota state is set to absent we should assume there will be changes.
- #The default quota values are not accessible so we can not determine if
- #no changes will occur or not.
+ # If a quota state is set to absent we should assume there will be changes.
+ # The default quota values are not accessible so we can not determine if
+ # no changes will occur or not.
if module.check_mode:
module.exit_json(changed=True)
- #Calling delete_network_quotas when a quota has not been set results
- #in an error, according to the shade docs it should return the
- #current quota.
- #The following error string is returned:
- #network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
+ # Calling delete_network_quotas when a quota has not been set results
+ # in an error, according to the shade docs it should return the
+ # current quota.
+ # The following error string is returned:
+ # network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
neutron_msg1 = "network client call failed: Quota for tenant"
neutron_msg2 = "could not be found"
@@ -495,7 +502,7 @@ def main():
quota_call = getattr(cloud, 'set_%s_quotas' % (quota_type))
quota_call(cloud_params['name'], **quota_change_request[quota_type])
- #Get quota state post changes for validation
+ # Get quota state post changes for validation
project_quota_update = _get_quotas(module, cloud, cloud_params['name'])
if project_quota_output == project_quota_update:
@@ -504,8 +511,8 @@ def main():
project_quota_output = project_quota_update
module.exit_json(changed=changes_required,
- openstack_quotas=project_quota_output
- )
+ openstack_quotas=project_quota_output
+ )
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
diff --git a/lib/ansible/modules/cloud/openstack/os_security_group_rule.py b/lib/ansible/modules/cloud/openstack/os_security_group_rule.py
index 7a9d771cb8..e6c6fac150 100644
--- a/lib/ansible/modules/cloud/openstack/os_security_group_rule.py
+++ b/lib/ansible/modules/cloud/openstack/os_security_group_rule.py
@@ -212,8 +212,8 @@ def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
if ((module_min is None and module_max is None) and
(rule_min and int(rule_min) == 1 and
rule_max and int(rule_max) == 65535)):
- # (None, None) == (1, 65535)
- return True
+ # (None, None) == (1, 65535)
+ return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
@@ -270,21 +270,21 @@ def _system_state_change(module, secgroup, remotegroup):
def main():
argument_spec = openstack_full_argument_spec(
- security_group = dict(required=True),
+ security_group=dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
- protocol = dict(default=None,
- choices=[None, 'tcp', 'udp', 'icmp', '112']),
- port_range_min = dict(required=False, type='int'),
- port_range_max = dict(required=False, type='int'),
- remote_ip_prefix = dict(required=False, default=None),
- remote_group = dict(required=False, default=None),
- ethertype = dict(default='IPv4',
- choices=['IPv4', 'IPv6']),
- direction = dict(default='ingress',
- choices=['egress', 'ingress']),
- state = dict(default='present',
- choices=['absent', 'present']),
+ protocol=dict(default=None,
+ choices=[None, 'tcp', 'udp', 'icmp', '112']),
+ port_range_min=dict(required=False, type='int'),
+ port_range_max=dict(required=False, type='int'),
+ remote_ip_prefix=dict(required=False, default=None),
+ remote_group=dict(required=False, default=None),
+ ethertype=dict(default='IPv4',
+ choices=['IPv4', 'IPv6']),
+ direction=dict(default='ingress',
+ choices=['egress', 'ingress']),
+ state=dict(default='present',
+ choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
@@ -312,7 +312,7 @@ def main():
if remote_group:
remotegroup = cloud.get_security_group(remote_group)
else:
- remotegroup = { 'id' : None }
+ remotegroup = {'id': None}
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
diff --git a/lib/ansible/modules/cloud/openstack/os_server.py b/lib/ansible/modules/cloud/openstack/os_server.py
index 098e17959a..751546b745 100644
--- a/lib/ansible/modules/cloud/openstack/os_server.py
+++ b/lib/ansible/modules/cloud/openstack/os_server.py
@@ -433,6 +433,7 @@ def _parse_nics(nics):
else:
yield net
+
def _network_args(module, cloud):
args = []
nics = module.params['nics']
@@ -685,31 +686,31 @@ def _get_server_state(module, cloud):
def main():
argument_spec = openstack_full_argument_spec(
- name = dict(required=True),
- image = dict(default=None),
- image_exclude = dict(default='(deprecated)'),
- flavor = dict(default=None),
- flavor_ram = dict(default=None, type='int'),
- flavor_include = dict(default=None),
- key_name = dict(default=None),
- security_groups = dict(default=['default'], type='list'),
- network = dict(default=None),
- nics = dict(default=[], type='list'),
- meta = dict(default=None, type='raw'),
- userdata = dict(default=None, aliases=['user_data']),
- config_drive = dict(default=False, type='bool'),
- auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
- floating_ips = dict(default=None, type='list'),
- floating_ip_pools = dict(default=None, type='list'),
- volume_size = dict(default=False, type='int'),
- boot_from_volume = dict(default=False, type='bool'),
- boot_volume = dict(default=None, aliases=['root_volume']),
- terminate_volume = dict(default=False, type='bool'),
- volumes = dict(default=[], type='list'),
- scheduler_hints = dict(default=None, type='dict'),
- state = dict(default='present', choices=['absent', 'present']),
- delete_fip = dict(default=False, type='bool'),
- reuse_ips = dict(default=True, type='bool'),
+ name=dict(required=True),
+ image=dict(default=None),
+ image_exclude=dict(default='(deprecated)'),
+ flavor=dict(default=None),
+ flavor_ram=dict(default=None, type='int'),
+ flavor_include=dict(default=None),
+ key_name=dict(default=None),
+ security_groups=dict(default=['default'], type='list'),
+ network=dict(default=None),
+ nics=dict(default=[], type='list'),
+ meta=dict(default=None, type='raw'),
+ userdata=dict(default=None, aliases=['user_data']),
+ config_drive=dict(default=False, type='bool'),
+ auto_ip=dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
+ floating_ips=dict(default=None, type='list'),
+ floating_ip_pools=dict(default=None, type='list'),
+ volume_size=dict(default=False, type='int'),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(default=None, aliases=['root_volume']),
+ terminate_volume=dict(default=False, type='bool'),
+ volumes=dict(default=[], type='list'),
+ scheduler_hints=dict(default=None, type='dict'),
+ state=dict(default='present', choices=['absent', 'present']),
+ delete_fip=dict(default=False, type='bool'),
+ reuse_ips=dict(default=True, type='bool'),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
diff --git a/lib/ansible/modules/cloud/openstack/os_stack.py b/lib/ansible/modules/cloud/openstack/os_stack.py
index 5f2f9fff5b..e250b5bde8 100644
--- a/lib/ansible/modules/cloud/openstack/os_stack.py
+++ b/lib/ansible/modules/cloud/openstack/os_stack.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-#coding: utf-8 -*-
+# coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
@@ -166,12 +166,12 @@ from ansible.module_utils.openstack import openstack_full_argument_spec, opensta
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
- template_file=module.params['template'],
- environment_files=module.params['environment'],
- timeout=module.params['timeout'],
- wait=True,
- rollback=module.params['rollback'],
- **module.params['parameters'])
+ template_file=module.params['template'],
+ environment_files=module.params['environment'],
+ timeout=module.params['timeout'],
+ wait=True,
+ rollback=module.params['rollback'],
+ **module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
@@ -181,6 +181,7 @@ def _create_stack(module, stack, cloud):
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
+
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
@@ -195,11 +196,12 @@ def _update_stack(module, stack, cloud):
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
- module.fail_json(msg = "Failure in updating stack: %s" %
+ module.fail_json(msg="Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
+
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
@@ -209,6 +211,7 @@ def _system_state_change(module, stack, cloud):
return True
return False
+
def main():
argument_spec = openstack_full_argument_spec(
diff --git a/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
index 7774549b27..7353e555c8 100644
--- a/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
+++ b/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
@@ -162,7 +162,7 @@ def main():
if not HAS_OVH:
module.fail_json(msg='ovh-api python module'
- 'is required to run this module ')
+ 'is required to run this module ')
# Get parameters
name = module.params.get('name')
diff --git a/lib/ansible/modules/cloud/packet/packet_sshkey.py b/lib/ansible/modules/cloud/packet/packet_sshkey.py
index 8b1f076e9c..f649e62fee 100644
--- a/lib/ansible/modules/cloud/packet/packet_sshkey.py
+++ b/lib/ansible/modules/cloud/packet/packet_sshkey.py
@@ -113,7 +113,7 @@ PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
def serialize_sshkey(sshkey):
sshkey_data = {}
- copy_keys = ['id', 'key', 'label','fingerprint']
+ copy_keys = ['id', 'key', 'label', 'fingerprint']
for name in copy_keys:
sshkey_data[name] = getattr(sshkey, name)
return sshkey_data
@@ -132,7 +132,7 @@ def load_key_string(key_str):
key_str = key_str.strip()
ret_dict['key'] = key_str
cut_key = key_str.split()
- if len(cut_key) in [2,3]:
+ if len(cut_key) in [2, 3]:
if len(cut_key) == 3:
ret_dict['label'] = cut_key[2]
else:
@@ -165,7 +165,7 @@ def get_sshkey_selector(module):
return k.key == select_dict['key']
else:
# if key string not specified, all the fields must match
- return all([select_dict[f] == getattr(k,f) for f in select_dict])
+ return all([select_dict[f] == getattr(k, f) for f in select_dict])
return selector
@@ -188,10 +188,10 @@ def act_on_sshkeys(target_state, module, packet_conn):
newkey['label'] = module.params.get('label')
for param in ('label', 'key'):
if param not in newkey:
- _msg=("If you want to ensure a key is present, you must "
- "supply both a label and a key string, either in "
- "module params, or in a key file. %s is missing"
- % param)
+ _msg = ("If you want to ensure a key is present, you must "
+ "supply both a label and a key string, either in "
+ "module params, or in a key file. %s is missing"
+ % param)
raise Exception(_msg)
matching_sshkeys = []
new_key_response = packet_conn.create_ssh_key(
@@ -208,7 +208,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
except Exception as e:
_msg = ("while trying to remove sshkey %s, id %s %s, "
"got error: %s" %
- (k.label, k.id, target_state, e))
+ (k.label, k.id, target_state, e))
raise Exception(_msg)
return {
@@ -220,7 +220,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
def main():
module = AnsibleModule(
argument_spec=dict(
- state = dict(choices=['present', 'absent'], default='present'),
+ state=dict(choices=['present', 'absent'], default='present'),
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
no_log=True),
label=dict(type='str', aliases=['name'], default=None),
@@ -236,16 +236,16 @@ def main():
('key', 'fingerprint'),
('key', 'id'),
('key_file', 'key'),
- ]
+ ]
)
if not HAS_PACKET_SDK:
module.fail_json(msg='packet required for this module')
if not module.params.get('auth_token'):
- _fail_msg = ( "if Packet API token is not in environment variable %s, "
- "the auth_token parameter is required" %
- PACKET_API_TOKEN_ENV_VAR)
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)
auth_token = module.params.get('auth_token')
@@ -254,7 +254,7 @@ def main():
state = module.params.get('state')
- if state in ['present','absent']:
+ if state in ['present', 'absent']:
try:
module.exit_json(**act_on_sshkeys(state, module, packet_conn))
except Exception as e:
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks.py b/lib/ansible/modules/cloud/profitbricks/profitbricks.py
index d965167d4b..cd11f57cb1 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks.py
@@ -238,7 +238,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
- ) + '" to complete.')
+ ) + '" to complete.')
def _create_machine(module, profitbricks, datacenter, name):
@@ -290,7 +290,7 @@ def _create_machine(module, profitbricks, datacenter, name):
n = NIC(
lan=int(lan)
- )
+ )
s = Server(
name=name,
@@ -299,7 +299,7 @@ def _create_machine(module, profitbricks, datacenter, name):
cpu_family=cpu_family,
create_volumes=[v],
nics=[n],
- )
+ )
try:
create_server_response = profitbricks.create_server(
@@ -341,7 +341,7 @@ def _create_datacenter(module, profitbricks):
i = Datacenter(
name=datacenter,
location=location
- )
+ )
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
@@ -624,7 +624,7 @@ def main():
if state == 'absent':
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required ' +
- 'for running or stopping machines.')
+ 'for running or stopping machines.')
try:
(changed) = remove_virtual_machine(module, profitbricks)
@@ -635,7 +635,7 @@ def main():
elif state in ('running', 'stopped'):
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required for ' +
- 'running or stopping machines.')
+ 'running or stopping machines.')
try:
(changed) = startstop_machine(module, profitbricks, state)
module.exit_json(changed=changed)
@@ -649,10 +649,10 @@ def main():
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is ' +
- 'required for new instance')
+ 'required for new instance')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is ' +
- 'required for new instance')
+ 'required for new instance')
try:
(machine_dict_array) = create_virtual_machine(module, profitbricks)
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
index bebff23c8c..e320544379 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
@@ -118,7 +118,8 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
- ) + '" to complete.')
+ ) + '" to complete.')
+
def _remove_datacenter(module, profitbricks, datacenter):
try:
@@ -126,6 +127,7 @@ def _remove_datacenter(module, profitbricks, datacenter):
except Exception as e:
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
def create_datacenter(module, profitbricks):
"""
Creates a Datacenter
@@ -148,7 +150,7 @@ def create_datacenter(module, profitbricks):
name=name,
location=location,
description=description
- )
+ )
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
@@ -166,6 +168,7 @@ def create_datacenter(module, profitbricks):
except Exception as e:
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
def remove_datacenter(module, profitbricks):
"""
Removes a Datacenter.
@@ -197,6 +200,7 @@ def remove_datacenter(module, profitbricks):
return changed
+
def main():
module = AnsibleModule(
argument_spec=dict(
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py
index ccf863a659..5d01618b3e 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py
@@ -121,7 +121,8 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
- ) + '" to complete.')
+ ) + '" to complete.')
+
def create_nic(module, profitbricks):
"""
@@ -173,6 +174,7 @@ def create_nic(module, profitbricks):
except Exception as e:
module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
def delete_nic(module, profitbricks):
"""
Removes a NIC
@@ -228,12 +230,13 @@ def delete_nic(module, profitbricks):
except Exception as e:
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
- name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
+ name=dict(default=str(uuid.uuid4()).replace('-', '')[:10]),
lan=dict(),
subscription_user=dict(),
subscription_password=dict(no_log=True),
@@ -255,7 +258,6 @@ def main():
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
-
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py
index 1d1980e078..c65939e7fa 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py
@@ -170,7 +170,7 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
- ) + '" to complete.')
+ ) + '" to complete.')
def _create_volume(module, profitbricks, datacenter, name):
@@ -194,7 +194,7 @@ def _create_volume(module, profitbricks, datacenter, name):
ssh_keys=ssh_keys,
disk_type=disk_type,
licence_type=licence_type
- )
+ )
volume_response = profitbricks.create_volume(datacenter, v)
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py
index 9a7bfec3bf..581e76a1ff 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py
@@ -118,7 +118,8 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
- ) + '" to complete.')
+ ) + '" to complete.')
+
def attach_volume(module, profitbricks):
"""
@@ -150,7 +151,7 @@ def attach_volume(module, profitbricks):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
- server= s['id']
+ server = s['id']
break
# Locate UUID for Volume
@@ -163,6 +164,7 @@ def attach_volume(module, profitbricks):
return profitbricks.attach_volume(datacenter, server, volume)
+
def detach_volume(module, profitbricks):
"""
Detaches a volume.
@@ -193,7 +195,7 @@ def detach_volume(module, profitbricks):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
- server= s['id']
+ server = s['id']
break
# Locate UUID for Volume
@@ -206,6 +208,7 @@ def detach_volume(module, profitbricks):
return profitbricks.detach_volume(datacenter, server, volume)
+
def main():
module = AnsibleModule(
argument_spec=dict(
diff --git a/lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py b/lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py
index 9bafd5e591..7cb1abe398 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py
@@ -93,7 +93,7 @@ from ansible.module_utils.rax import (NON_CALLABLES,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
def cloud_block_storage_attachments(module, state, volume, server, device,
diff --git a/lib/ansible/modules/cloud/rackspace/rax_cdb.py b/lib/ansible/modules/cloud/rackspace/rax_cdb.py
index eb03328ffb..ced4369676 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_cdb.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_cdb.py
@@ -148,7 +148,7 @@ def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
- cdb=rax_to_dict(instance))
+ cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True
diff --git a/lib/ansible/modules/cloud/rackspace/rax_clb.py b/lib/ansible/modules/cloud/rackspace/rax_clb.py
index 86b750edf7..f00bd5e63e 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_clb.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_clb.py
@@ -141,7 +141,7 @@ from ansible.module_utils.rax import (CLB_ALGORITHMS,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
diff --git a/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py b/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py
index d6f89417c2..6dfee78817 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py
@@ -11,7 +11,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
-DOCUMENTATION='''
+DOCUMENTATION = '''
module: rax_clb_ssl
short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
description:
@@ -100,7 +100,8 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
+
def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
certificate, intermediate_certificate, secure_port,
@@ -222,6 +223,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
else:
module.fail_json(**result)
+
def main():
argument_spec = rax_argument_spec()
argument_spec.update(dict(
diff --git a/lib/ansible/modules/cloud/rackspace/rax_dns.py b/lib/ansible/modules/cloud/rackspace/rax_dns.py
index 76ce9ec625..1a0aaba6f6 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_dns.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_dns.py
@@ -72,7 +72,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
def rax_dns(module, comment, email, name, state, ttl):
diff --git a/lib/ansible/modules/cloud/rackspace/rax_dns_record.py b/lib/ansible/modules/cloud/rackspace/rax_dns_record.py
index 6c04d68c9d..6ed9b4fdc8 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_dns_record.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_dns_record.py
@@ -128,7 +128,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
diff --git a/lib/ansible/modules/cloud/rackspace/rax_facts.py b/lib/ansible/modules/cloud/rackspace/rax_facts.py
index 42d8201691..4c40a550a0 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_facts.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_facts.py
@@ -61,7 +61,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
def rax_facts(module, address, name, server_id):
diff --git a/lib/ansible/modules/cloud/rackspace/rax_keypair.py b/lib/ansible/modules/cloud/rackspace/rax_keypair.py
index 57988a75e5..0a6010662c 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_keypair.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_keypair.py
@@ -91,7 +91,7 @@ from ansible.module_utils.rax import (rax_argument_spec,
rax_required_together,
rax_to_dict,
setup_rax_module,
- )
+ )
def rax_keypair(module, name, public_key, state):
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py b/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py
index 996f5f849d..04e5db258f 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py
@@ -180,6 +180,7 @@ def alarm(module, state, label, entity_id, check_id, notification_plan_id, crite
else:
module.exit_json(changed=changed)
+
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_check.py b/lib/ansible/modules/cloud/rackspace/rax_mon_check.py
index 48a5156b99..a6e791db96 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_mon_check.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_check.py
@@ -256,6 +256,7 @@ def cloud_check(module, state, entity_id, label, check_type,
else:
module.exit_json(changed=changed)
+
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py b/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
index 0ed3aac18f..21d917c301 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
@@ -152,6 +152,7 @@ def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
else:
module.exit_json(changed=changed)
+
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py b/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
index d4da95b756..6aee351b96 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
@@ -138,6 +138,7 @@ def notification(module, state, label, notification_type, details):
else:
module.exit_json(changed=changed)
+
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py b/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
index fb5822aa6e..486599bcbd 100644
--- a/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
@@ -141,6 +141,7 @@ def notification_plan(module, state, label, critical_state, warning_state, ok_st
else:
module.exit_json(changed=changed)
+
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
diff --git a/lib/ansible/modules/cloud/univention/udm_dns_record.py b/lib/ansible/modules/cloud/univention/udm_dns_record.py
index b7916bb7fb..5ad7b90b44 100644
--- a/lib/ansible/modules/cloud/univention/udm_dns_record.py
+++ b/lib/ansible/modules/cloud/univention/udm_dns_record.py
@@ -94,21 +94,21 @@ from ansible.module_utils.univention_umc import (
def main():
module = AnsibleModule(
- argument_spec = dict(
- type = dict(required=True,
- type='str'),
- zone = dict(required=True,
- type='str'),
- name = dict(required=True,
- type='str'),
- data = dict(default=[],
- type='dict'),
- state = dict(default='present',
- choices=['present', 'absent'],
- type='str')
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ type='str'),
+ name=dict(required=True,
+ type='str'),
+ data=dict(default=[],
+ type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
),
supports_check_mode=True,
- required_if = ([
+ required_if=([
('state', 'present', ['data'])
])
)
@@ -116,12 +116,12 @@ def main():
if not HAVE_UNIVENTION:
module.fail_json(msg="This module requires univention python bindings")
- type = module.params['type']
- zone = module.params['zone']
- name = module.params['name']
- data = module.params['data']
- state = module.params['state']
- changed = False
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={})(relativeDomainName={}))'.format(zone, name),
diff --git a/lib/ansible/modules/cloud/univention/udm_dns_zone.py b/lib/ansible/modules/cloud/univention/udm_dns_zone.py
index 8d167ff8aa..5b3069f4bd 100644
--- a/lib/ansible/modules/cloud/univention/udm_dns_zone.py
+++ b/lib/ansible/modules/cloud/univention/udm_dns_zone.py
@@ -111,10 +111,10 @@ from ansible.module_utils.univention_umc import (
def convert_time(time):
"""Convert a time in seconds into the biggest unit"""
units = [
- (24 * 60 * 60 , 'days'),
- (60 * 60 , 'hours'),
- (60 , 'minutes'),
- (1 , 'seconds'),
+ (24 * 60 * 60, 'days'),
+ (60 * 60, 'hours'),
+ (60, 'minutes'),
+ (1, 'seconds'),
]
if time == 0:
@@ -126,49 +126,49 @@ def convert_time(time):
def main():
module = AnsibleModule(
- argument_spec = dict(
- type = dict(required=True,
- type='str'),
- zone = dict(required=True,
- aliases=['name'],
- type='str'),
- nameserver = dict(default=[],
- type='list'),
- interfaces = dict(default=[],
- type='list'),
- refresh = dict(default=3600,
- type='int'),
- retry = dict(default=1800,
- type='int'),
- expire = dict(default=604800,
- type='int'),
- ttl = dict(default=600,
- type='int'),
- contact = dict(default='',
- type='str'),
- mx = dict(default=[],
- type='list'),
- state = dict(default='present',
- choices=['present', 'absent'],
- type='str')
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver=dict(default=[],
+ type='list'),
+ interfaces=dict(default=[],
+ type='list'),
+ refresh=dict(default=3600,
+ type='int'),
+ retry=dict(default=1800,
+ type='int'),
+ expire=dict(default=604800,
+ type='int'),
+ ttl=dict(default=600,
+ type='int'),
+ contact=dict(default='',
+ type='str'),
+ mx=dict(default=[],
+ type='list'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
),
supports_check_mode=True,
- required_if = ([
+ required_if=([
('state', 'present', ['nameserver', 'interfaces'])
])
)
- type = module.params['type']
- zone = module.params['zone']
- nameserver = module.params['nameserver']
- interfaces = module.params['interfaces']
- refresh = module.params['refresh']
- retry = module.params['retry']
- expire = module.params['expire']
- ttl = module.params['ttl']
- contact = module.params['contact']
- mx = module.params['mx']
- state = module.params['state']
- changed = False
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
@@ -187,15 +187,15 @@ def main():
obj = umc_module_for_add('dns/{}'.format(type), container)
else:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
- obj['zone'] = zone
- obj['nameserver'] = nameserver
- obj['a'] = interfaces
- obj['refresh'] = convert_time(refresh)
- obj['retry'] = convert_time(retry)
- obj['expire'] = convert_time(expire)
- obj['ttl'] = convert_time(ttl)
- obj['contact'] = contact
- obj['mx'] = mx
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
diff = obj.diff()
if exists:
for k in obj.keys():
diff --git a/lib/ansible/modules/cloud/univention/udm_group.py b/lib/ansible/modules/cloud/univention/udm_group.py
index be3bb21e59..7e6564591e 100644
--- a/lib/ansible/modules/cloud/univention/udm_group.py
+++ b/lib/ansible/modules/cloud/univention/udm_group.py
@@ -87,30 +87,30 @@ from ansible.module_utils.univention_umc import (
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True,
- type='str'),
- description = dict(default=None,
- type='str'),
- position = dict(default='',
- type='str'),
- ou = dict(default='',
- type='str'),
- subpath = dict(default='cn=groups',
- type='str'),
- state = dict(default='present',
- choices=['present', 'absent'],
- type='str')
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ description=dict(default=None,
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=groups',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
),
supports_check_mode=True
)
- name = module.params['name']
+ name = module.params['name']
description = module.params['description']
- position = module.params['position']
- ou = module.params['ou']
- subpath = module.params['subpath']
- state = module.params['state']
- changed = False
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
groups = list(ldap_search(
'(&(objectClass=posixGroup)(cn={}))'.format(name),
@@ -134,8 +134,8 @@ def main():
grp = umc_module_for_add('groups/group', container)
else:
grp = umc_module_for_edit('groups/group', group_dn)
- grp['name'] = name
- grp['description'] = description
+ grp['name'] = name
+ grp['description'] = description
diff = grp.diff()
changed = grp.diff() != []
if not module.check_mode:
diff --git a/lib/ansible/modules/cloud/univention/udm_share.py b/lib/ansible/modules/cloud/univention/udm_share.py
index 4c9abfb95c..68bcad5667 100644
--- a/lib/ansible/modules/cloud/univention/udm_share.py
+++ b/lib/ansible/modules/cloud/univention/udm_share.py
@@ -380,162 +380,162 @@ from ansible.module_utils.univention_umc import (
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True,
- type='str'),
- ou = dict(required=True,
- type='str'),
- owner = dict(type='str',
- default='0'),
- group = dict(type='str',
- default='0'),
- path = dict(type='path',
- default=None),
- directorymode = dict(type='str',
- default='00755'),
- host = dict(type='str',
- default=None),
- root_squash = dict(type='bool',
- default=True),
- subtree_checking = dict(type='bool',
- default=True),
- sync = dict(type='str',
- default='sync'),
- writeable = dict(type='bool',
- default=True),
- sambaBlockSize = dict(type='str',
- aliases=['samba_block_size'],
- default=None),
- sambaBlockingLocks = dict(type='bool',
- aliases=['samba_blocking_locks'],
- default=True),
- sambaBrowseable = dict(type='bool',
- aliases=['samba_browsable'],
- default=True),
- sambaCreateMode = dict(type='str',
- aliases=['samba_create_mode'],
- default='0744'),
- sambaCscPolicy = dict(type='str',
- aliases=['samba_csc_policy'],
- default='manual'),
- sambaCustomSettings = dict(type='list',
- aliases=['samba_custom_settings'],
- default=[]),
- sambaDirectoryMode = dict(type='str',
- aliases=['samba_directory_mode'],
- default='0755'),
- sambaDirectorySecurityMode = dict(type='str',
- aliases=['samba_directory_security_mode'],
- default='0777'),
- sambaDosFilemode = dict(type='bool',
- aliases=['samba_dos_filemode'],
- default=False),
- sambaFakeOplocks = dict(type='bool',
- aliases=['samba_fake_oplocks'],
- default=False),
- sambaForceCreateMode = dict(type='bool',
- aliases=['samba_force_create_mode'],
- default=False),
- sambaForceDirectoryMode = dict(type='bool',
- aliases=['samba_force_directory_mode'],
- default=False),
- sambaForceDirectorySecurityMode = dict(type='bool',
- aliases=['samba_force_directory_security_mode'],
- default=False),
- sambaForceGroup = dict(type='str',
- aliases=['samba_force_group'],
- default=None),
- sambaForceSecurityMode = dict(type='bool',
- aliases=['samba_force_security_mode'],
- default=False),
- sambaForceUser = dict(type='str',
- aliases=['samba_force_user'],
- default=None),
- sambaHideFiles = dict(type='str',
- aliases=['samba_hide_files'],
- default=None),
- sambaHideUnreadable = dict(type='bool',
- aliases=['samba_hide_unreadable'],
- default=False),
- sambaHostsAllow = dict(type='list',
- aliases=['samba_hosts_allow'],
- default=[]),
- sambaHostsDeny = dict(type='list',
- aliases=['samba_hosts_deny'],
- default=[]),
- sambaInheritAcls = dict(type='bool',
- aliases=['samba_inherit_acls'],
- default=True),
- sambaInheritOwner = dict(type='bool',
- aliases=['samba_inherit_owner'],
- default=False),
- sambaInheritPermissions = dict(type='bool',
- aliases=['samba_inherit_permissions'],
- default=False),
- sambaInvalidUsers = dict(type='str',
- aliases=['samba_invalid_users'],
- default=None),
- sambaLevel2Oplocks = dict(type='bool',
- aliases=['samba_level_2_oplocks'],
- default=True),
- sambaLocking = dict(type='bool',
- aliases=['samba_locking'],
- default=True),
- sambaMSDFSRoot = dict(type='bool',
- aliases=['samba_msdfs_root'],
- default=False),
- sambaName = dict(type='str',
- aliases=['samba_name'],
- default=None),
- sambaNtAclSupport = dict(type='bool',
- aliases=['samba_nt_acl_support'],
- default=True),
- sambaOplocks = dict(type='bool',
- aliases=['samba_oplocks'],
- default=True),
- sambaPostexec = dict(type='str',
- aliases=['samba_postexec'],
- default=None),
- sambaPreexec = dict(type='str',
- aliases=['samba_preexec'],
- default=None),
- sambaPublic = dict(type='bool',
- aliases=['samba_public'],
- default=False),
- sambaSecurityMode = dict(type='str',
- aliases=['samba_security_mode'],
- default='0777'),
- sambaStrictLocking = dict(type='str',
- aliases=['samba_strict_locking'],
- default='Auto'),
- sambaVFSObjects = dict(type='str',
- aliases=['samba_vfs_objects'],
- default=None),
- sambaValidUsers = dict(type='str',
- aliases=['samba_valid_users'],
- default=None),
- sambaWriteList = dict(type='str',
- aliases=['samba_write_list'],
- default=None),
- sambaWriteable = dict(type='bool',
- aliases=['samba_writeable'],
- default=True),
- nfs_hosts = dict(type='list',
- default=[]),
- nfsCustomSettings = dict(type='list',
- aliases=['nfs_custom_settings'],
- default=[]),
- state = dict(default='present',
- choices=['present', 'absent'],
- type='str')
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ ou=dict(required=True,
+ type='str'),
+ owner=dict(type='str',
+ default='0'),
+ group=dict(type='str',
+ default='0'),
+ path=dict(type='path',
+ default=None),
+ directorymode=dict(type='str',
+ default='00755'),
+ host=dict(type='str',
+ default=None),
+ root_squash=dict(type='bool',
+ default=True),
+ subtree_checking=dict(type='bool',
+ default=True),
+ sync=dict(type='str',
+ default='sync'),
+ writeable=dict(type='bool',
+ default=True),
+ sambaBlockSize=dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks=dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable=dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode=dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy=dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings=dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode=dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode=dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode=dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks=dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode=dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode=dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode=dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup=dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode=dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser=dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles=dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable=dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow=dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny=dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls=dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner=dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions=dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers=dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks=dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking=dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot=dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName=dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport=dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks=dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec=dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec=dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic=dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode=dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking=dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects=dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers=dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList=dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable=dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts=dict(type='list',
+ default=[]),
+ nfsCustomSettings=dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
),
supports_check_mode=True,
- required_if = ([
+ required_if=([
('state', 'present', ['path', 'host', 'sambaName'])
])
)
- name = module.params['name']
- state = module.params['state']
+ name = module.params['name']
+ state = module.params['state']
changed = False
obj = list(ldap_search(
diff --git a/lib/ansible/modules/cloud/univention/udm_user.py b/lib/ansible/modules/cloud/univention/udm_user.py
index 1d021611bb..b0f6ee0a60 100644
--- a/lib/ansible/modules/cloud/univention/udm_user.py
+++ b/lib/ansible/modules/cloud/univention/udm_user.py
@@ -348,141 +348,141 @@ from ansible.module_utils.univention_umc import (
def main():
expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
module = AnsibleModule(
- argument_spec = dict(
- birthday = dict(default=None,
- type='str'),
- city = dict(default=None,
- type='str'),
- country = dict(default=None,
- type='str'),
- department_number = dict(default=None,
- type='str',
- aliases=['departmentNumber']),
- description = dict(default=None,
- type='str'),
- display_name = dict(default=None,
- type='str',
- aliases=['displayName']),
- email = dict(default=[''],
- type='list'),
- employee_number = dict(default=None,
- type='str',
- aliases=['employeeNumber']),
- employee_type = dict(default=None,
- type='str',
- aliases=['employeeType']),
- firstname = dict(default=None,
- type='str'),
- gecos = dict(default=None,
- type='str'),
- groups = dict(default=[],
- type='list'),
- home_share = dict(default=None,
- type='str',
- aliases=['homeShare']),
- home_share_path = dict(default=None,
- type='str',
- aliases=['homeSharePath']),
- home_telephone_number = dict(default=[],
- type='list',
- aliases=['homeTelephoneNumber']),
- homedrive = dict(default=None,
- type='str'),
- lastname = dict(default=None,
- type='str'),
- mail_alternative_address= dict(default=[],
- type='list',
- aliases=['mailAlternativeAddress']),
- mail_home_server = dict(default=None,
- type='str',
- aliases=['mailHomeServer']),
- mail_primary_address = dict(default=None,
- type='str',
- aliases=['mailPrimaryAddress']),
- mobile_telephone_number = dict(default=[],
- type='list',
- aliases=['mobileTelephoneNumber']),
- organisation = dict(default=None,
- type='str'),
- overridePWHistory = dict(default=False,
- type='bool',
- aliases=['override_pw_history']),
- overridePWLength = dict(default=False,
- type='bool',
- aliases=['override_pw_length']),
- pager_telephonenumber = dict(default=[],
- type='list',
- aliases=['pagerTelephonenumber']),
- password = dict(default=None,
- type='str',
- no_log=True),
- phone = dict(default=[],
- type='list'),
- postcode = dict(default=None,
- type='str'),
- primary_group = dict(default=None,
- type='str',
- aliases=['primaryGroup']),
- profilepath = dict(default=None,
- type='str'),
- pwd_change_next_login = dict(default=None,
- type='str',
- choices=['0', '1'],
- aliases=['pwdChangeNextLogin']),
- room_number = dict(default=None,
- type='str',
- aliases=['roomNumber']),
- samba_privileges = dict(default=[],
- type='list',
- aliases=['sambaPrivileges']),
- samba_user_workstations = dict(default=[],
- type='list',
- aliases=['sambaUserWorkstations']),
- sambahome = dict(default=None,
- type='str'),
- scriptpath = dict(default=None,
- type='str'),
- secretary = dict(default=[],
- type='list'),
- serviceprovider = dict(default=[''],
- type='list'),
- shell = dict(default='/bin/bash',
- type='str'),
- street = dict(default=None,
- type='str'),
- title = dict(default=None,
- type='str'),
- unixhome = dict(default=None,
- type='str'),
- userexpiry = dict(default=expiry,
- type='str'),
- username = dict(required=True,
- aliases=['name'],
- type='str'),
- position = dict(default='',
- type='str'),
- update_password = dict(default='always',
- choices=['always', 'on_create'],
- type='str'),
- ou = dict(default='',
- type='str'),
- subpath = dict(default='cn=users',
- type='str'),
- state = dict(default='present',
- choices=['present', 'absent'],
- type='str')
+ argument_spec=dict(
+ birthday=dict(default=None,
+ type='str'),
+ city=dict(default=None,
+ type='str'),
+ country=dict(default=None,
+ type='str'),
+ department_number=dict(default=None,
+ type='str',
+ aliases=['departmentNumber']),
+ description=dict(default=None,
+ type='str'),
+ display_name=dict(default=None,
+ type='str',
+ aliases=['displayName']),
+ email=dict(default=[''],
+ type='list'),
+ employee_number=dict(default=None,
+ type='str',
+ aliases=['employeeNumber']),
+ employee_type=dict(default=None,
+ type='str',
+ aliases=['employeeType']),
+ firstname=dict(default=None,
+ type='str'),
+ gecos=dict(default=None,
+ type='str'),
+ groups=dict(default=[],
+ type='list'),
+ home_share=dict(default=None,
+ type='str',
+ aliases=['homeShare']),
+ home_share_path=dict(default=None,
+ type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number=dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive=dict(default=None,
+ type='str'),
+ lastname=dict(default=None,
+ type='str'),
+ mail_alternative_address=dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server=dict(default=None,
+ type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address=dict(default=None,
+ type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number=dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation=dict(default=None,
+ type='str'),
+ overridePWHistory=dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength=dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber=dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password=dict(default=None,
+ type='str',
+ no_log=True),
+ phone=dict(default=[],
+ type='list'),
+ postcode=dict(default=None,
+ type='str'),
+ primary_group=dict(default=None,
+ type='str',
+ aliases=['primaryGroup']),
+ profilepath=dict(default=None,
+ type='str'),
+ pwd_change_next_login=dict(default=None,
+ type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number=dict(default=None,
+ type='str',
+ aliases=['roomNumber']),
+ samba_privileges=dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations=dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome=dict(default=None,
+ type='str'),
+ scriptpath=dict(default=None,
+ type='str'),
+ secretary=dict(default=[],
+ type='list'),
+ serviceprovider=dict(default=[''],
+ type='list'),
+ shell=dict(default='/bin/bash',
+ type='str'),
+ street=dict(default=None,
+ type='str'),
+ title=dict(default=None,
+ type='str'),
+ unixhome=dict(default=None,
+ type='str'),
+ userexpiry=dict(default=expiry,
+ type='str'),
+ username=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ update_password=dict(default='always',
+ choices=['always', 'on_create'],
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=users',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
),
supports_check_mode=True,
- required_if = ([
+ required_if=([
('state', 'present', ['firstname', 'lastname', 'password'])
])
)
- username = module.params['username']
- position = module.params['position']
- ou = module.params['ou']
- subpath = module.params['subpath']
- state = module.params['state']
- changed = False
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
users = list(ldap_search(
'(&(objectClass=posixAccount)(uid={}))'.format(username),
@@ -532,7 +532,7 @@ def main():
old_password = obj['password'].split('}', 2)[1]
if crypt.crypt(password, old_password) != old_password:
obj['overridePWHistory'] = module.params['overridePWHistory']
- obj['overridePWLength'] = module.params['overridePWLength']
+ obj['overridePWLength'] = module.params['overridePWLength']
obj['password'] = password
diff = obj.diff()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_dns_config.py b/lib/ansible/modules/cloud/vmware/vmware_dns_config.py
index 0f4de41bb5..a60ac4d691 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_dns_config.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_dns_config.py
@@ -94,8 +94,8 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
- domainname=dict(required=True, type='str'),
- dns_servers=dict(required=True, type='list')))
+ domainname=dict(required=True, type='str'),
+ dns_servers=dict(required=True, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
diff --git a/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py b/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py
index 54b3aedf48..e90d2ff2b6 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py
@@ -234,9 +234,9 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
- switch_name=dict(required=True, type='str'),
- vmnics=dict(required=True, type='list'),
- state=dict(default='present', choices=['present', 'absent'], type='str')))
+ switch_name=dict(required=True, type='str'),
+ vmnics=dict(required=True, type='list'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
diff --git a/lib/ansible/modules/cloud/vmware/vmware_guest.py b/lib/ansible/modules/cloud/vmware/vmware_guest.py
index 66167ef6e7..185ac9b1fd 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_guest.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_guest.py
@@ -507,6 +507,7 @@ class PyVmomiDeviceHelper(object):
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
+
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
@@ -849,7 +850,7 @@ class PyVmomiHelper(PyVmomi):
if (nic.device.backing and not hasattr(nic.device.backing, 'port')):
nic_change_detected = True
elif (nic.device.backing and (nic.device.backing.port.portgroupKey != pg_obj.key or
- nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
+ nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_guest_find.py b/lib/ansible/modules/cloud/vmware/vmware_guest_find.py
index ad32c8e2a2..f4651fa48d 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_guest_find.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_guest_find.py
@@ -156,7 +156,6 @@ class PyVmomiHelper(object):
return tree
def _build_folder_map(self, folder, inpath='/'):
-
""" Build a searchable index for vms+uuids+folders """
if isinstance(folder, tuple):
folder = folder[1]
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py b/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py
index b16d397990..167df25fd6 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py
@@ -154,15 +154,15 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
- ip_address=dict(required=True, type='str'),
- subnet_mask=dict(required=True, type='str'),
- mtu=dict(required=False, type='int'),
- enable_vsan=dict(required=False, type='bool'),
- enable_vmotion=dict(required=False, type='bool'),
- enable_mgmt=dict(required=False, type='bool'),
- enable_ft=dict(required=False, type='bool'),
- vswitch_name=dict(required=True, type='str'),
- vlan_id=dict(required=True, type='int')))
+ ip_address=dict(required=True, type='str'),
+ subnet_mask=dict(required=True, type='str'),
+ mtu=dict(required=False, type='int'),
+ enable_vsan=dict(required=False, type='bool'),
+ enable_vmotion=dict(required=False, type='bool'),
+ enable_mgmt=dict(required=False, type='bool'),
+ enable_ft=dict(required=False, type='bool'),
+ vswitch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py b/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py
index 215d4d974f..82348fd0fd 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py
@@ -87,8 +87,8 @@ def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
- ip_address=dict(required=True, type='str'),
- subnet_mask=dict(required=True, type='str')))
+ ip_address=dict(required=True, type='str'),
+ subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
diff --git a/lib/ansible/modules/cloud/vmware/vsphere_guest.py b/lib/ansible/modules/cloud/vmware/vsphere_guest.py
index 049ea178c7..d7cb384a2a 100644
--- a/lib/ansible/modules/cloud/vmware/vsphere_guest.py
+++ b/lib/ansible/modules/cloud/vmware/vsphere_guest.py
@@ -627,6 +627,7 @@ def spec_singleton(spec, request, vm):
spec = request.new_spec()
return spec
+
def get_cdrom_params(module, s, vm_cdrom):
cdrom_type = None
cdrom_iso_path = None
@@ -648,6 +649,7 @@ def get_cdrom_params(module, s, vm_cdrom):
return cdrom_type, cdrom_iso_path
+
def vmdisk_id(vm, current_datastore_name):
id_list = []
for vm_disk in vm._disks:
@@ -666,9 +668,9 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
# Datacenter managed object reference
dclist = [k for k,
- v in vsphere_client.get_datacenters().items() if v == datacenter]
+ v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
- dcmor=dclist[0]
+ dcmor = dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
@@ -744,7 +746,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
cloneArgs = dict(resourcepool=rpmor, power_on=False)
if snapshot_to_clone is not None:
- #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
+ # check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
cloneArgs["linked"] = True
cloneArgs["snapshot"] = snapshot_to_clone
@@ -778,6 +780,8 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
# was used.
+
+
def update_disks(vsphere_client, vm, module, vm_disk, changes):
request = VI.ReconfigVM_TaskRequestMsg()
changed = False
@@ -868,7 +872,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
if vm_extra_config:
spec = spec_singleton(spec, request, vm)
extra_config = []
- for k,v in vm_extra_config.items():
+ for k, v in vm_extra_config.items():
ec = spec.new_extraConfig()
ec.set_element_key(str(k))
ec.set_element_value(str(v))
@@ -988,7 +992,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
spec = spec_singleton(spec, request, vm)
# Get a list of the VM's hard drives
- dev_list = [d for d in vm.properties.config.hardware.device if d._type=='VirtualDisk']
+ dev_list = [d for d in vm.properties.config.hardware.device if d._type == 'VirtualDisk']
if len(vm_disk) > len(dev_list):
vsphere_client.disconnect()
module.fail_json(msg="Error in vm_disk definition. Too many disks defined in comparison to the VM's disk profile.")
@@ -1072,102 +1076,102 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None):
- s = vsphere_client
- nics = {}
- request = VI.ReconfigVM_TaskRequestMsg()
- _this = request.new__this(vm._mor)
- _this.set_attribute_type(vm._mor.get_attribute_type())
- request.set_element__this(_this)
- nic_changes = []
- datacenter = esxi['datacenter']
- # Datacenter managed object reference
- dclist = [k for k,
- v in vsphere_client.get_datacenters().items() if v == datacenter]
- if dclist:
- dcmor=dclist[0]
- else:
- vsphere_client.disconnect()
- module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
- dcprops = VIProperty(vsphere_client, dcmor)
- nfmor = dcprops.networkFolder._obj
- for k,v in vm_nic.items():
- nicNum = k[len(k) -1]
- if vm_nic[k]['network_type'] == 'dvs':
- portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
- todvs = True
- elif vm_nic[k]['network_type'] == 'standard':
- todvs = False
- # Detect cards that need to be changed and network type (and act accordingly)
- for dev in vm.properties.config.hardware.device:
- if dev._type in ["VirtualE1000", "VirtualE1000e",
- "VirtualPCNet32", "VirtualVmxnet",
- "VirtualNmxnet2", "VirtualVmxnet3"]:
- devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
- if devNum == nicNum:
- fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
- if todvs and fromdvs:
- if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
- nics[k] = (dev, portgroupKey, 1)
- elif fromdvs and not todvs:
+ s = vsphere_client
+ nics = {}
+ request = VI.ReconfigVM_TaskRequestMsg()
+ _this = request.new__this(vm._mor)
+ _this.set_attribute_type(vm._mor.get_attribute_type())
+ request.set_element__this(_this)
+ nic_changes = []
+ datacenter = esxi['datacenter']
+ # Datacenter managed object reference
+ dclist = [k for k,
+ v in vsphere_client.get_datacenters().items() if v == datacenter]
+ if dclist:
+ dcmor = dclist[0]
+ else:
+ vsphere_client.disconnect()
+ module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
+ dcprops = VIProperty(vsphere_client, dcmor)
+ nfmor = dcprops.networkFolder._obj
+ for k, v in vm_nic.items():
+ nicNum = k[len(k) - 1]
+ if vm_nic[k]['network_type'] == 'dvs':
+ portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
+ todvs = True
+ elif vm_nic[k]['network_type'] == 'standard':
+ todvs = False
+ # Detect cards that need to be changed and network type (and act accordingly)
+ for dev in vm.properties.config.hardware.device:
+ if dev._type in ["VirtualE1000", "VirtualE1000e",
+ "VirtualPCNet32", "VirtualVmxnet",
+ "VirtualNmxnet2", "VirtualVmxnet3"]:
+ devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
+ if devNum == nicNum:
+ fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
+ if todvs and fromdvs:
+ if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
+ nics[k] = (dev, portgroupKey, 1)
+ elif fromdvs and not todvs:
+ nics[k] = (dev, '', 2)
+ elif not fromdvs and todvs:
+ nics[k] = (dev, portgroupKey, 3)
+ elif not fromdvs and not todvs:
+ if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
nics[k] = (dev, '', 2)
- elif not fromdvs and todvs:
- nics[k] = (dev, portgroupKey, 3)
- elif not fromdvs and not todvs:
- if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
- nics[k] = (dev, '', 2)
- else:
- pass
else:
- module.exit_json()
-
- if len(nics) > 0:
- for nic, obj in nics.items():
- """
- 1,2 and 3 are used to mark which action should be taken
- 1 = from a distributed switch to a distributed switch
- 2 = to a standard switch
- 3 = to a distributed switch
- """
- dev = obj[0]
- pgKey = obj[1]
- dvsKey = obj[2]
- if dvsKey == 1:
- dev.backing.port._obj.set_element_portgroupKey(pgKey)
- dev.backing.port._obj.set_element_portKey('')
- if dvsKey == 3:
- dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
- nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
- "nic_backing_port").pyclass()
- nic_backing_port.set_element_switchUuid(dvswitch_uuid)
- nic_backing_port.set_element_portgroupKey(pgKey)
- nic_backing_port.set_element_portKey('')
- nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
- "nic_backing").pyclass()
- nic_backing.set_element_port(nic_backing_port)
- dev._obj.set_element_backing(nic_backing)
- if dvsKey == 2:
- nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
- "nic_backing").pyclass()
- nic_backing.set_element_deviceName(vm_nic[nic]['network'])
- dev._obj.set_element_backing(nic_backing)
- for nic, obj in nics.items():
- dev = obj[0]
- spec = request.new_spec()
- nic_change = spec.new_deviceChange()
- nic_change.set_element_device(dev._obj)
- nic_change.set_element_operation("edit")
- nic_changes.append(nic_change)
- spec.set_element_deviceChange(nic_changes)
- request.set_element_spec(spec)
- ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
- task = VITask(ret, vsphere_client)
- status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
- if status == task.STATE_SUCCESS:
- return(True)
- elif status == task.STATE_ERROR:
- module.fail_json(msg="Could not change network %s" % task.get_error_message())
- elif len(nics) == 0:
- return(False)
+ pass
+ else:
+ module.exit_json()
+
+ if len(nics) > 0:
+ for nic, obj in nics.items():
+ """
+ 1,2 and 3 are used to mark which action should be taken
+ 1 = from a distributed switch to a distributed switch
+ 2 = to a standard switch
+ 3 = to a distributed switch
+ """
+ dev = obj[0]
+ pgKey = obj[1]
+ dvsKey = obj[2]
+ if dvsKey == 1:
+ dev.backing.port._obj.set_element_portgroupKey(pgKey)
+ dev.backing.port._obj.set_element_portKey('')
+ if dvsKey == 3:
+ dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
+ nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
+ "nic_backing_port").pyclass()
+ nic_backing_port.set_element_switchUuid(dvswitch_uuid)
+ nic_backing_port.set_element_portgroupKey(pgKey)
+ nic_backing_port.set_element_portKey('')
+ nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
+ "nic_backing").pyclass()
+ nic_backing.set_element_port(nic_backing_port)
+ dev._obj.set_element_backing(nic_backing)
+ if dvsKey == 2:
+ nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
+ "nic_backing").pyclass()
+ nic_backing.set_element_deviceName(vm_nic[nic]['network'])
+ dev._obj.set_element_backing(nic_backing)
+ for nic, obj in nics.items():
+ dev = obj[0]
+ spec = request.new_spec()
+ nic_change = spec.new_deviceChange()
+ nic_change.set_element_device(dev._obj)
+ nic_change.set_element_operation("edit")
+ nic_changes.append(nic_change)
+ spec.set_element_deviceChange(nic_changes)
+ request.set_element_spec(spec)
+ ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
+ task = VITask(ret, vsphere_client)
+ status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
+ if status == task.STATE_SUCCESS:
+ return(True)
+ elif status == task.STATE_ERROR:
+ module.fail_json(msg="Could not change network %s" % task.get_error_message())
+ elif len(nics) == 0:
+ return(False)
def _build_folder_tree(nodes, parent):
@@ -1218,9 +1222,9 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
- v in vsphere_client.get_datacenters().items() if v == datacenter]
+ v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
- dcmor=dclist[0]
+ dcmor = dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
@@ -1419,7 +1423,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
" to be specified." % vm_hardware['vm_floppy'])
# Add a floppy to the VM.
add_floppy(module, vsphere_client, config_target, config, devices,
- default_devs, floppy_type, floppy_image_path)
+ default_devs, floppy_type, floppy_image_path)
if vm_nic:
for nic in sorted(vm_nic):
try:
@@ -1479,7 +1483,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
# Power on the VM if it was requested
power_state(vm, state, True)
- vmfacts=gather_facts(vm)
+ vmfacts = gather_facts(vm)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=vmfacts,
@@ -1579,13 +1583,13 @@ def gather_facts(vm):
'module_hw': True,
'hw_name': vm.properties.name,
'hw_power_status': vm.get_status(),
- 'hw_guest_full_name': vm.properties.config.guestFullName,
+ 'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
'hw_instance_uuid': vm.properties.config.instanceUuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
- 'hw_interfaces':[],
+ 'hw_interfaces': [],
}
netInfo = vm.get_property('net')
netDict = {}
@@ -1608,7 +1612,7 @@ def gather_facts(vm):
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
- facts['hw_interfaces'].append('eth'+str(ifidx))
+ facts['hw_interfaces'].append('eth' + str(ifidx))
ifidx += 1
@@ -1753,7 +1757,7 @@ def main():
),
supports_check_mode=False,
- mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
+ mutually_exclusive=[['state', 'vmware_guest_facts'], ['state', 'from_template']],
required_together=[
['state', 'force'],
[
@@ -1791,7 +1795,6 @@ def main():
power_on_after_clone = module.params['power_on_after_clone']
validate_certs = module.params['validate_certs']
-
# CONNECT TO THE SERVER
viserver = VIServer()
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
@@ -1896,10 +1899,9 @@ def main():
# check if user is trying to perform state operation on a vm which doesn't exists
elif state in ['present', 'powered_off', 'powered_on'] and not all((vm_extra_config,
- vm_hardware, vm_disk, vm_nic, esxi)):
+ vm_hardware, vm_disk, vm_nic, esxi)):
module.exit_json(changed=False, msg="vm %s not present" % guest)
-
# Create the VM
elif state in ['present', 'powered_off', 'powered_on']:
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_app.py b/lib/ansible/modules/cloud/webfaction/webfaction_app.py
index 9f9796160a..d2f6a3a6e7 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_app.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_app.py
@@ -110,20 +110,20 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True),
- state = dict(required=False, choices=['present', 'absent'], default='present'),
- type = dict(required=True),
- autostart = dict(required=False, type='bool', default=False),
- extra_info = dict(required=False, default=""),
- port_open = dict(required=False, type='bool', default=False),
- login_name = dict(required=True),
- login_password = dict(required=True, no_log=True),
- machine = dict(required=False, default=False),
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=True),
+ autostart=dict(required=False, type='bool', default=False),
+ extra_info=dict(required=False, default=""),
+ port_open=dict(required=False, type='bool', default=False),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=False),
),
supports_check_mode=True
)
- app_name = module.params['name']
+ app_name = module.params['name']
app_type = module.params['type']
app_state = module.params['state']
@@ -157,7 +157,7 @@ def main():
# If it exists with the right type, we don't change it
# Should check other parameters.
module.exit_json(
- changed = False,
+ changed=False,
)
if not module.check_mode:
@@ -176,7 +176,7 @@ def main():
# If the app's already not there, nothing changed.
if not existing_app:
module.exit_json(
- changed = False,
+ changed=False,
)
if not module.check_mode:
@@ -188,10 +188,9 @@ def main():
else:
module.fail_json(msg="Unknown state specified: {}".format(app_state))
-
module.exit_json(
- changed = True,
- result = result
+ changed=True,
+ result=result
)
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_db.py b/lib/ansible/modules/cloud/webfaction/webfaction_db.py
index 35476327d4..deb8fd4e87 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_db.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_db.py
@@ -102,21 +102,21 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True),
- state = dict(required=False, choices=['present', 'absent'], default='present'),
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
- type = dict(required=True),
- password = dict(required=False, default=None, no_log=True),
- login_name = dict(required=True),
- login_password = dict(required=True, no_log=True),
- machine = dict(required=False, default=False),
+ type=dict(required=True),
+ password=dict(required=False, default=None, no_log=True),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=False),
),
supports_check_mode=True
)
- db_name = module.params['name']
+ db_name = module.params['name']
db_state = module.params['state']
- db_type = module.params['type']
+ db_type = module.params['type']
db_passwd = module.params['password']
if module.params['machine']:
@@ -153,10 +153,9 @@ def main():
# If it exists with the right type, we don't change anything.
module.exit_json(
- changed = False,
+ changed=False,
)
-
if not module.check_mode:
# If this isn't a dry run, create the db
# and default user.
@@ -172,7 +171,7 @@ def main():
if not module.check_mode:
if not (existing_db or existing_user):
- module.exit_json(changed = False,)
+ module.exit_json(changed=False,)
if existing_db:
# Delete the db if it exists
@@ -190,8 +189,8 @@ def main():
module.fail_json(msg="Unknown state specified: {}".format(db_state))
module.exit_json(
- changed = True,
- result = result
+ changed=True,
+ result=result
)
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_domain.py b/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
index 65a7e60beb..2b32de866e 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
@@ -93,16 +93,16 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True),
- state = dict(required=False, choices=['present', 'absent'], default='present'),
- subdomains = dict(required=False, default=[]),
- login_name = dict(required=True),
- login_password = dict(required=True, no_log=True),
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains=dict(required=False, default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
),
supports_check_mode=True
)
- domain_name = module.params['name']
+ domain_name = module.params['name']
domain_state = module.params['state']
domain_subdomains = module.params['subdomains']
@@ -127,7 +127,7 @@ def main():
if set(existing_domain['subdomains']) >= set(domain_subdomains):
# If it exists with the right subdomains, we don't change anything.
module.exit_json(
- changed = False,
+ changed=False,
)
positional_args = [session_id, domain_name] + domain_subdomains
@@ -146,7 +146,7 @@ def main():
# If the app's already not there, nothing changed.
if not existing_domain:
module.exit_json(
- changed = False,
+ changed=False,
)
positional_args = [session_id, domain_name] + domain_subdomains
@@ -161,8 +161,8 @@ def main():
module.fail_json(msg="Unknown state specified: {}".format(domain_state))
module.exit_json(
- changed = True,
- result = result
+ changed=True,
+ result=result
)
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_site.py b/lib/ansible/modules/cloud/webfaction/webfaction_site.py
index ab5e583c5a..bee932419f 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_site.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_site.py
@@ -108,20 +108,20 @@ webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True),
- state = dict(required=False, choices=['present', 'absent'], default='present'),
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
- host = dict(required=True),
- https = dict(required=False, type='bool', default=False),
- subdomains = dict(required=False, type='list', default=[]),
- site_apps = dict(required=False, type='list', default=[]),
- login_name = dict(required=True),
- login_password = dict(required=True, no_log=True),
+ host=dict(required=True),
+ https=dict(required=False, type='bool', default=False),
+ subdomains=dict(required=False, type='list', default=[]),
+ site_apps=dict(required=False, type='list', default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
),
supports_check_mode=True
)
- site_name = module.params['name']
+ site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
@@ -159,7 +159,7 @@ def main():
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
- changed = False
+ changed=False
)
positional_args = [
@@ -168,16 +168,16 @@ def main():
module.params['subdomains'],
]
for a in module.params['site_apps']:
- positional_args.append( (a[0], a[1]) )
+ positional_args.append((a[0], a[1]))
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
- ) if not existing_site else webfaction.update_website (
- *positional_args
- )
+ ) if not existing_site else webfaction.update_website(
+ *positional_args
+ )
)
elif site_state == 'absent':
@@ -185,7 +185,7 @@ def main():
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
- changed = False,
+ changed=False,
)
if not module.check_mode:
@@ -198,8 +198,8 @@ def main():
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
- changed = True,
- result = result
+ changed=True,
+ result=result
)
diff --git a/lib/ansible/modules/clustering/consul_acl.py b/lib/ansible/modules/clustering/consul_acl.py
index 6c304381d3..446e80ee27 100644
--- a/lib/ansible/modules/clustering/consul_acl.py
+++ b/lib/ansible/modules/clustering/consul_acl.py
@@ -456,6 +456,7 @@ class Configuration:
"""
Configuration for this module.
"""
+
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
rules=None, state=None, token=None, token_type=None):
self.management_token = management_token # type: str
@@ -474,6 +475,7 @@ class Output:
"""
Output of an action of this module.
"""
+
def __init__(self, changed=None, token=None, rules=None, operation=None):
self.changed = changed # type: bool
self.token = token # type: str
@@ -485,6 +487,7 @@ class ACL:
"""
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
"""
+
def __init__(self, rules, token_type, token, name):
self.rules = rules
self.token_type = token_type
@@ -507,6 +510,7 @@ class Rule:
"""
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
"""
+
def __init__(self, scope, policy, pattern=None):
self.scope = scope
self.policy = policy
@@ -532,6 +536,7 @@ class RuleCollection:
"""
Collection of ACL rules, which are part of a Consul ACL.
"""
+
def __init__(self):
self._rules = {}
for scope in RULE_SCOPES:
diff --git a/lib/ansible/modules/database/misc/kibana_plugin.py b/lib/ansible/modules/database/misc/kibana_plugin.py
index f5bb2c0be8..f56f36a87e 100644
--- a/lib/ansible/modules/database/misc/kibana_plugin.py
+++ b/lib/ansible/modules/database/misc/kibana_plugin.py
@@ -147,9 +147,11 @@ def parse_plugin_repo(string):
return repo
+
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
def parse_error(string):
reason = "reason: "
try:
@@ -157,6 +159,7 @@ def parse_error(string):
except ValueError:
return string
+
def install_plugin(module, plugin_bin, plugin_name, url, timeout):
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
@@ -178,6 +181,7 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout):
return True, cmd, out, err
+
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
@@ -193,6 +197,7 @@ def remove_plugin(module, plugin_bin, plugin_name):
return True, cmd, out, err
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -208,14 +213,14 @@ def main():
supports_check_mode=True,
)
- name = module.params["name"]
- state = module.params["state"]
- url = module.params["url"]
- timeout = module.params["timeout"]
- plugin_bin = module.params["plugin_bin"]
- plugin_dir = module.params["plugin_dir"]
- version = module.params["version"]
- force = module.params["force"]
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
diff --git a/lib/ansible/modules/database/misc/riak.py b/lib/ansible/modules/database/misc/riak.py
index 03b36b2ca5..13be9cab1f 100644
--- a/lib/ansible/modules/database/misc/riak.py
+++ b/lib/ansible/modules/database/misc/riak.py
@@ -102,6 +102,7 @@ def ring_check(module, riak_admin_bin):
else:
return False
+
def main():
module = AnsibleModule(
@@ -115,10 +116,9 @@ def main():
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
- validate_certs = dict(default='yes', type='bool'))
+ validate_certs=dict(default='yes', type='bool'))
)
-
command = module.params.get('command')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
@@ -126,8 +126,7 @@ def main():
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
-
- #make sure riak commands are on the path
+ # make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
@@ -150,16 +149,16 @@ def main():
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
- rc, out, err = module.run_command([riak_bin, 'version'] )
+ rc, out, err = module.run_command([riak_bin, 'version'])
version = out.strip()
result = dict(node_name=node_name,
- nodes=nodes,
- ring_size=ring_size,
- version=version)
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
if command == 'ping':
- cmd = '%s ping %s' % ( riak_bin, target_node )
+ cmd = '%s ping %s' % (riak_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
@@ -219,7 +218,7 @@ def main():
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
- cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
rc, out, err = module.run_command(cmd)
result['service'] = out
diff --git a/lib/ansible/modules/database/mongodb/mongodb_parameter.py b/lib/ansible/modules/database/mongodb/mongodb_parameter.py
index fdd69ffc9c..28dbf13505 100644
--- a/lib/ansible/modules/database/mongodb/mongodb_parameter.py
+++ b/lib/ansible/modules/database/mongodb/mongodb_parameter.py
@@ -223,5 +223,5 @@ def main():
after=value)
-if __name__ == '__main__':
+if __name__ == '__main__':
main()
diff --git a/lib/ansible/modules/database/mongodb/mongodb_user.py b/lib/ansible/modules/database/mongodb/mongodb_user.py
index 00f2620fce..9e00e967e4 100644
--- a/lib/ansible/modules/database/mongodb/mongodb_user.py
+++ b/lib/ansible/modules/database/mongodb/mongodb_user.py
@@ -270,8 +270,8 @@ def user_find(client, user, db_name):
def user_add(module, client, db_name, user, password, roles):
- #pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
- #without reproducing a lot of the logic in database.py of pymongo
+ # pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
+ # without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
@@ -279,6 +279,7 @@ def user_add(module, client, db_name, user, password, roles):
else:
db.add_user(user, password, None, roles=roles)
+
def user_remove(module, client, db_name, user):
exists = user_find(client, user, db_name)
if exists:
@@ -289,6 +290,7 @@ def user_remove(module, client, db_name, user):
else:
module.exit_json(changed=False, user=user)
+
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
@@ -305,7 +307,6 @@ def load_mongocnf():
return creds
-
def check_if_roles_changed(uinfo, roles, db_name):
# We must be aware of users which can read the oplog on a replicaset
# Such users must have access to the local DB, but since this DB does not store users credentials
@@ -327,7 +328,7 @@ def check_if_roles_changed(uinfo, roles, db_name):
output = list()
for role in roles:
if isinstance(role, (binary_type, text_type)):
- new_role = { "role": role, "db": db_name }
+ new_role = {"role": role, "db": db_name}
output.append(new_role)
else:
output.append(role)
@@ -341,14 +342,13 @@ def check_if_roles_changed(uinfo, roles, db_name):
return True
-
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
- argument_spec = dict(
+ argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
@@ -417,7 +417,7 @@ def main():
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
- #else: this has to be the first admin user added
+ # else: this has to be the first admin user added
except Exception as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
@@ -443,7 +443,7 @@ def main():
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
#newuinfo = user_find(client, user, db_name)
- #if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
+ # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
diff --git a/lib/ansible/modules/database/mssql/mssql_db.py b/lib/ansible/modules/database/mssql/mssql_db.py
index 018a68c3a6..f542b44a31 100644
--- a/lib/ansible/modules/database/mssql/mssql_db.py
+++ b/lib/ansible/modules/database/mssql/mssql_db.py
@@ -91,7 +91,7 @@ EXAMPLES = '''
target: /tmp/dump.sql
'''
-RETURN = '''
+RETURN = '''
#
'''
@@ -126,6 +126,7 @@ def db_delete(conn, cursor, db):
cursor.execute("DROP DATABASE [%s]" % db)
return not db_exists(conn, cursor, db)
+
def db_import(conn, cursor, module, db, target):
if os.path.isfile(target):
backup = open(target, 'r')
diff --git a/lib/ansible/modules/database/postgresql/postgresql_ext.py b/lib/ansible/modules/database/postgresql/postgresql_ext.py
index f0e83fb148..97bd549f21 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_ext.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_ext.py
@@ -99,6 +99,7 @@ def ext_exists(cursor, ext):
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
+
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
@@ -107,6 +108,7 @@ def ext_delete(cursor, ext):
else:
return False
+
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
@@ -119,6 +121,7 @@ def ext_create(cursor, ext):
# Module execution.
#
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -130,7 +133,7 @@ def main():
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
- supports_check_mode = True
+ supports_check_mode=True
)
if not postgresqldb_found:
@@ -145,13 +148,13 @@ def main():
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
- "login_host":"host",
- "login_user":"user",
- "login_password":"password",
- "port":"port"
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port"
}
- kw = dict( (params_map[k], v) for (k, v) in module.params.items()
- if k in params_map and v != '' )
+ kw = dict((params_map[k], v) for (k, v) in module.params.items()
+ if k in params_map and v != '')
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
diff --git a/lib/ansible/modules/database/postgresql/postgresql_lang.py b/lib/ansible/modules/database/postgresql/postgresql_lang.py
index 1db21f4033..8e5afb6a7a 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_lang.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_lang.py
@@ -161,18 +161,21 @@ def lang_exists(cursor, lang):
cursor.execute(query)
return cursor.rowcount > 0
+
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.fetchone()[0]
+
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
cursor.execute(query, (trust, lang))
return True
+
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
@@ -182,6 +185,7 @@ def lang_add(cursor, lang, trust):
cursor.execute(query)
return True
+
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
@@ -197,6 +201,7 @@ def lang_drop(cursor, lang, cascade):
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -212,7 +217,7 @@ def main():
cascade=dict(type='bool', default='no'),
fail_on_drop=dict(type='bool', default='yes'),
),
- supports_check_mode = True
+ supports_check_mode=True
)
db = module.params["db"]
@@ -227,14 +232,14 @@ def main():
module.fail_json(msg="the python psycopg2 module is required")
params_map = {
- "login_host":"host",
- "login_user":"user",
- "login_password":"password",
- "port":"port",
- "db":"database"
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "db": "database"
}
- kw = dict( (params_map[k], v) for (k, v) in module.params.items()
- if k in params_map and v != "" )
+ kw = dict((params_map[k], v) for (k, v) in module.params.items()
+ if k in params_map and v != "")
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
diff --git a/lib/ansible/modules/database/postgresql/postgresql_schema.py b/lib/ansible/modules/database/postgresql/postgresql_schema.py
index 0f5673a9f9..d8edee6326 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_schema.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_schema.py
@@ -125,6 +125,7 @@ def set_owner(cursor, schema, owner):
cursor.execute(query)
return True
+
def get_schema_info(cursor, schema):
query = """
SELECT schema_owner AS owner
@@ -134,11 +135,13 @@ def get_schema_info(cursor, schema):
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
+
def schema_exists(cursor, schema):
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
+
def schema_delete(cursor, schema):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
@@ -147,6 +150,7 @@ def schema_delete(cursor, schema):
else:
return False
+
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
@@ -162,6 +166,7 @@ def schema_create(cursor, schema, owner):
else:
return False
+
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
@@ -176,6 +181,7 @@ def schema_matches(cursor, schema, owner):
# Module execution.
#
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -189,7 +195,7 @@ def main():
database=dict(default="postgres"),
state=dict(default="present", choices=["absent", "present"]),
),
- supports_check_mode = True
+ supports_check_mode=True
)
if not postgresqldb_found:
@@ -205,13 +211,13 @@ def main():
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
- "login_host":"host",
- "login_user":"user",
- "login_password":"password",
- "port":"port"
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port"
}
- kw = dict( (params_map[k], v) for (k, v) in module.params.items()
- if k in params_map and v != '' )
+ kw = dict((params_map[k], v) for (k, v) in module.params.items()
+ if k in params_map and v != '')
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
diff --git a/lib/ansible/modules/database/vertica/vertica_configuration.py b/lib/ansible/modules/database/vertica/vertica_configuration.py
index 670e068e24..b4734edf52 100644
--- a/lib/ansible/modules/database/vertica/vertica_configuration.py
+++ b/lib/ansible/modules/database/vertica/vertica_configuration.py
@@ -86,11 +86,13 @@ from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
+
class CannotDropError(Exception):
pass
# module specific functions
+
def get_configuration_facts(cursor, parameter_name=''):
facts = {}
cursor.execute("""
@@ -110,12 +112,14 @@ def get_configuration_facts(cursor, parameter_name=''):
'default_value': row.default_value}
return facts
+
def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
return False
return True
+
def present(configuration_facts, cursor, parameter_name, current_value):
parameter_key = parameter_name.lower()
changed = False
@@ -128,6 +132,7 @@ def present(configuration_facts, cursor, parameter_name, current_value):
# module logic
+
def main():
module = AnsibleModule(
@@ -139,7 +144,7 @@ def main():
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
- ), supports_check_mode = True)
+ ), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
@@ -161,8 +166,8 @@ def main():
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
- ).format(module.params['cluster'], module.params['port'], db,
- module.params['login_user'], module.params['login_password'], 'true')
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
diff --git a/lib/ansible/modules/database/vertica/vertica_facts.py b/lib/ansible/modules/database/vertica/vertica_facts.py
index d3b72b6c7e..c471b9413e 100644
--- a/lib/ansible/modules/database/vertica/vertica_facts.py
+++ b/lib/ansible/modules/database/vertica/vertica_facts.py
@@ -81,6 +81,7 @@ class NotSupportedError(Exception):
# module specific functions
+
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
@@ -121,6 +122,7 @@ def get_schema_facts(cursor, schema=''):
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
+
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
@@ -155,6 +157,7 @@ def get_user_facts(cursor, user=''):
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
+
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
@@ -175,6 +178,7 @@ def get_role_facts(cursor, role=''):
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
+
def get_configuration_facts(cursor, parameter=''):
facts = {}
cursor.execute("""
@@ -194,6 +198,7 @@ def get_configuration_facts(cursor, parameter=''):
'default_value': row.default_value}
return facts
+
def get_node_facts(cursor, schema=''):
facts = {}
cursor.execute("""
@@ -216,6 +221,7 @@ def get_node_facts(cursor, schema=''):
# module logic
+
def main():
module = AnsibleModule(
@@ -225,7 +231,7 @@ def main():
db=dict(default=None),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
- ), supports_check_mode = True)
+ ), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
@@ -243,8 +249,8 @@ def main():
"User=%s;"
"Password=%s;"
"ConnectionLoadBalance=%s"
- ) % (module.params['cluster'], module.params['port'], db,
- module.params['login_user'], module.params['login_password'], 'true')
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
@@ -257,11 +263,11 @@ def main():
configuration_facts = get_configuration_facts(cursor)
node_facts = get_node_facts(cursor)
module.exit_json(changed=False,
- ansible_facts={'vertica_schemas': schema_facts,
- 'vertica_users': user_facts,
- 'vertica_roles': role_facts,
- 'vertica_configuration': configuration_facts,
- 'vertica_nodes': node_facts})
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
diff --git a/lib/ansible/modules/database/vertica/vertica_role.py b/lib/ansible/modules/database/vertica/vertica_role.py
index 33dbb7ad3c..2385b9f44e 100644
--- a/lib/ansible/modules/database/vertica/vertica_role.py
+++ b/lib/ansible/modules/database/vertica/vertica_role.py
@@ -98,11 +98,13 @@ from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
+
class CannotDropError(Exception):
pass
# module specific functions
+
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
@@ -123,6 +125,7 @@ def get_role_facts(cursor, role=''):
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
+
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
@@ -130,6 +133,7 @@ def update_roles(role_facts, cursor, role,
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
@@ -138,6 +142,7 @@ def check(role_facts, role, assigned_roles):
return False
return True
+
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
@@ -147,19 +152,20 @@ def present(role_facts, cursor, role, assigned_roles):
return True
else:
changed = False
- if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
+ if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
update_roles(role_facts, cursor, role,
- role_facts[role_key]['assigned_roles'], assigned_roles)
+ role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
+
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
- role_facts[role_key]['assigned_roles'], [])
+ role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
@@ -168,6 +174,7 @@ def absent(role_facts, cursor, role, assigned_roles):
# module logic
+
def main():
module = AnsibleModule(
@@ -180,7 +187,7 @@ def main():
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
- ), supports_check_mode = True)
+ ), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
@@ -206,8 +213,8 @@ def main():
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
- ).format(module.params['cluster'], module.params['port'], db,
- module.params['login_user'], module.params['login_password'], 'true')
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
diff --git a/lib/ansible/modules/database/vertica/vertica_schema.py b/lib/ansible/modules/database/vertica/vertica_schema.py
index b92b8873de..713435f635 100644
--- a/lib/ansible/modules/database/vertica/vertica_schema.py
+++ b/lib/ansible/modules/database/vertica/vertica_schema.py
@@ -122,11 +122,13 @@ from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
+
class CannotDropError(Exception):
pass
# module specific functions
+
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
@@ -167,6 +169,7 @@ def get_schema_facts(cursor, schema=''):
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
+
def update_roles(schema_facts, cursor, schema,
existing, required,
create_existing, create_required):
@@ -180,6 +183,7 @@ def update_roles(schema_facts, cursor, schema,
for role in set(create_required) - set(create_existing):
cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
def check(schema_facts, schema, usage_roles, create_roles, owner):
schema_key = schema.lower()
if schema_key not in schema_facts:
@@ -192,6 +196,7 @@ def check(schema_facts, schema, usage_roles, create_roles, owner):
return False
return True
+
def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
schema_key = schema.lower()
if schema_key not in schema_facts:
@@ -208,23 +213,24 @@ def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
raise NotSupportedError((
"Changing schema owner is not supported. "
"Current owner: {0}."
- ).format(schema_facts[schema_key]['owner']))
+ ).format(schema_facts[schema_key]['owner']))
if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
update_roles(schema_facts, cursor, schema,
- schema_facts[schema_key]['usage_roles'], usage_roles,
- schema_facts[schema_key]['create_roles'], create_roles)
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
changed = True
if changed:
schema_facts.update(get_schema_facts(cursor, schema))
return changed
+
def absent(schema_facts, cursor, schema, usage_roles, create_roles):
schema_key = schema.lower()
if schema_key in schema_facts:
update_roles(schema_facts, cursor, schema,
- schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
try:
cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
except pyodbc.Error:
@@ -236,6 +242,7 @@ def absent(schema_facts, cursor, schema, usage_roles, create_roles):
# module logic
+
def main():
module = AnsibleModule(
@@ -250,7 +257,7 @@ def main():
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
- ), supports_check_mode = True)
+ ), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
@@ -281,8 +288,8 @@ def main():
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
- ).format(module.params['cluster'], module.params['port'], db,
- module.params['login_user'], module.params['login_password'], 'true')
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
diff --git a/lib/ansible/modules/database/vertica/vertica_user.py b/lib/ansible/modules/database/vertica/vertica_user.py
index 90cf624caf..d15f1a8e59 100644
--- a/lib/ansible/modules/database/vertica/vertica_user.py
+++ b/lib/ansible/modules/database/vertica/vertica_user.py
@@ -134,11 +134,13 @@ from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
+
class CannotDropError(Exception):
pass
# module specific functions
+
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
@@ -173,6 +175,7 @@ def get_user_facts(cursor, user=''):
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
+
def update_roles(user_facts, cursor, user,
existing_all, existing_default, required):
del_roles = list(set(existing_all) - set(required))
@@ -184,6 +187,7 @@ def update_roles(user_facts, cursor, user,
if required:
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
@@ -198,13 +202,14 @@ def check(user_facts, user, profile, resource_pool,
if password and password != user_facts[user_key]['password']:
return False
if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
- ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
return False
- if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or \
- sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
return False
return True
+
def present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
@@ -267,20 +272,21 @@ def present(user_facts, cursor, user, profile, resource_pool,
changed = True
if changed:
cursor.execute(' '.join(query_fragments))
- if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or \
- sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
update_roles(user_facts, cursor, user,
- user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
changed = True
if changed:
user_facts.update(get_user_facts(cursor, user))
return changed
+
def absent(user_facts, cursor, user, roles):
user_key = user.lower()
if user_key in user_facts:
update_roles(user_facts, cursor, user,
- user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
try:
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
except pyodbc.Error:
@@ -292,6 +298,7 @@ def absent(user_facts, cursor, user, roles):
# module logic
+
def main():
module = AnsibleModule(
@@ -309,7 +316,7 @@ def main():
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
- ), supports_check_mode = True)
+ ), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
@@ -348,8 +355,8 @@ def main():
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
- ).format(module.params['cluster'], module.params['port'], db,
- module.params['login_user'], module.params['login_password'], 'true')
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
@@ -359,7 +366,7 @@ def main():
user_facts = get_user_facts(cursor)
if module.check_mode:
changed = not check(user_facts, user, profile, resource_pool,
- locked, password, expired, ldap, roles)
+ locked, password, expired, ldap, roles)
elif state == 'absent':
try:
changed = absent(user_facts, cursor, user, roles)
@@ -368,7 +375,7 @@ def main():
elif state in ['present', 'locked']:
try:
changed = present(user_facts, cursor, user, profile, resource_pool,
- locked, password, expired, ldap, roles)
+ locked, password, expired, ldap, roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
diff --git a/lib/ansible/modules/monitoring/bigpanda.py b/lib/ansible/modules/monitoring/bigpanda.py
index 44108608ee..ddd0776b9c 100644
--- a/lib/ansible/modules/monitoring/bigpanda.py
+++ b/lib/ansible/modules/monitoring/bigpanda.py
@@ -185,7 +185,7 @@ def main():
# Send the data to bigpanda
data = json.dumps(body)
- headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
try:
response, info = fetch_url(module, request_url, data=data, headers=headers)
if info['status'] == 200:
diff --git a/lib/ansible/modules/monitoring/datadog_event.py b/lib/ansible/modules/monitoring/datadog_event.py
index 8ac6d96e35..5036a647a9 100644
--- a/lib/ansible/modules/monitoring/datadog_event.py
+++ b/lib/ansible/modules/monitoring/datadog_event.py
@@ -135,7 +135,7 @@ def main():
choices=['error', 'warning', 'info', 'success']
),
aggregation_key=dict(required=False, default=None),
- validate_certs = dict(default='yes', type='bool'),
+ validate_certs=dict(default='yes', type='bool'),
)
)
diff --git a/lib/ansible/modules/monitoring/icinga2_feature.py b/lib/ansible/modules/monitoring/icinga2_feature.py
index f4e88ec19d..c85af1b108 100644
--- a/lib/ansible/modules/monitoring/icinga2_feature.py
+++ b/lib/ansible/modules/monitoring/icinga2_feature.py
@@ -103,7 +103,7 @@ class Icinga2FeatureHelper:
change_applied = True
# RC is not 0 for this already disabled feature, handle it as no change applied
elif re.search("Cannot disable feature '%s'. Target file .* does not exist"
- % self.module.params["name"]):
+ % self.module.params["name"]):
change_applied = False
else:
self.module.fail_json(msg="Fail to disable feature. Command returns %s" % out)
diff --git a/lib/ansible/modules/monitoring/librato_annotation.py b/lib/ansible/modules/monitoring/librato_annotation.py
index 416dc8f6a1..1f1969a12c 100644
--- a/lib/ansible/modules/monitoring/librato_annotation.py
+++ b/lib/ansible/modules/monitoring/librato_annotation.py
@@ -135,21 +135,22 @@ def post_annotation(module):
response = response.read()
module.exit_json(changed=True, annotation=response)
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- user = dict(required=True),
- api_key = dict(required=True),
- name = dict(required=False),
- title = dict(required=True),
- source = dict(required=False),
- description = dict(required=False),
- start_time = dict(required=False, default=None, type='int'),
- end_time = dict(require=False, default=None, type='int'),
- links = dict(type='list')
- )
+ argument_spec=dict(
+ user=dict(required=True),
+ api_key=dict(required=True),
+ name=dict(required=False),
+ title=dict(required=True),
+ source=dict(required=False),
+ description=dict(required=False),
+ start_time=dict(required=False, default=None, type='int'),
+ end_time=dict(require=False, default=None, type='int'),
+ links=dict(type='list')
)
+ )
post_annotation(module)
diff --git a/lib/ansible/modules/monitoring/logentries.py b/lib/ansible/modules/monitoring/logentries.py
index bd8f87829f..625c3065fd 100644
--- a/lib/ansible/modules/monitoring/logentries.py
+++ b/lib/ansible/modules/monitoring/logentries.py
@@ -85,9 +85,9 @@ def follow_log(module, le_path, logs, name=None, logtype=None):
cmd = [le_path, 'follow', log]
if name:
- cmd.extend(['--name',name])
+ cmd.extend(['--name', name])
if logtype:
- cmd.extend(['--type',logtype])
+ cmd.extend(['--type', logtype])
rc, out, err = module.run_command(' '.join(cmd))
if not query_log_status(module, le_path, log):
@@ -100,6 +100,7 @@ def follow_log(module, le_path, logs, name=None, logtype=None):
module.exit_json(changed=False, msg="logs(s) already followed")
+
def unfollow_log(module, le_path, logs):
""" Unfollows one or more logs if followed. """
@@ -125,13 +126,14 @@ def unfollow_log(module, le_path, logs):
module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- path = dict(required=True),
- state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
- name = dict(required=False, default=None, type='str'),
- logtype = dict(required=False, default=None, type='str', aliases=['type'])
+ argument_spec=dict(
+ path=dict(required=True),
+ state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name=dict(required=False, default=None, type='str'),
+ logtype=dict(required=False, default=None, type='str', aliases=['type'])
),
supports_check_mode=True
)
diff --git a/lib/ansible/modules/monitoring/logicmonitor.py b/lib/ansible/modules/monitoring/logicmonitor.py
index d6885b4baf..21cc2c32a2 100644
--- a/lib/ansible/modules/monitoring/logicmonitor.py
+++ b/lib/ansible/modules/monitoring/logicmonitor.py
@@ -549,7 +549,6 @@ except ImportError:
HAS_LIB_JSON = False
-
class LogicMonitor(object):
def __init__(self, module, **params):
@@ -667,7 +666,7 @@ class LogicMonitor(object):
for host in hosts:
if (host["hostName"] == hostname and
- host["agentId"] == collector["id"]):
+ host["agentId"] == collector["id"]):
self.module.debug("Host match found")
return host
@@ -688,7 +687,7 @@ class LogicMonitor(object):
self.module.debug("Looking for displayname " + displayname)
self.module.debug("Making RPC call to 'getHost'")
host_json = (json.loads(self.rpc("getHost",
- {"displayName": displayname})))
+ {"displayName": displayname})))
if host_json["status"] == 200:
self.module.debug("RPC call succeeded")
@@ -1028,18 +1027,18 @@ class Collector(LogicMonitor):
else:
self.fail(msg="Error: Unable to retrieve timezone offset")
- offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+ offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60)
h = {"agentId": self.id,
"type": 1,
"notifyCC": True,
"year": offsetstart.year,
- "month": offsetstart.month-1,
+ "month": offsetstart.month - 1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
- "endMonth": offsetend.month-1,
+ "endMonth": offsetend.month - 1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
@@ -1187,7 +1186,7 @@ class Host(LogicMonitor):
# Used the host information to grab the collector description
# if not provided
if (not hasattr(self.params, "collector") and
- "agentDescription" in info):
+ "agentDescription" in info):
self.module.debug("Setting collector from host response. " +
"Collector " + info["agentDescription"])
self.params["collector"] = info["agentDescription"]
@@ -1238,8 +1237,8 @@ class Host(LogicMonitor):
if self.info:
self.module.debug("Making RPC call to 'getHostProperties'")
properties_json = (json.loads(self.rpc("getHostProperties",
- {'hostId': self.info["id"],
- "filterSystemProperties": True})))
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
@@ -1411,8 +1410,8 @@ class Host(LogicMonitor):
return True
if (self.collector and
- hasattr(self.collector, "id") and
- hostresp["agentId"] != self.collector["id"]):
+ hasattr(self.collector, "id") and
+ hostresp["agentId"] != self.collector["id"]):
return True
self.module.debug("Comparing groups.")
@@ -1469,7 +1468,7 @@ class Host(LogicMonitor):
self.fail(
msg="Error: Unable to retrieve timezone offset")
- offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+ offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60)
h = {"hostId": self.info["id"],
"type": 1,
@@ -1599,7 +1598,7 @@ class Host(LogicMonitor):
hgresp = json.loads(self.rpc("getHostGroup", h))
if (hgresp["status"] == 200 and
- hgresp["data"]["appliesTo"] == ""):
+ hgresp["data"]["appliesTo"] == ""):
g.append(path[-1])
@@ -1632,7 +1631,7 @@ class Host(LogicMonitor):
for prop in propresp:
if prop["name"] not in ignore:
if ("*******" in prop["value"] and
- self._verify_property(prop["name"])):
+ self._verify_property(prop["name"])):
p[prop["name"]] = self.properties[prop["name"]]
else:
p[prop["name"]] = prop["value"]
@@ -1641,7 +1640,7 @@ class Host(LogicMonitor):
# Iterate provided properties and compare to received properties
for prop in self.properties:
if (prop not in p or
- p[prop] != self.properties[prop]):
+ p[prop] != self.properties[prop]):
self.module.debug("Properties mismatch")
return True
self.module.debug("Properties match")
@@ -1703,18 +1702,18 @@ class Datasource(LogicMonitor):
else:
self.fail(msg="Error: Unable to retrieve timezone offset")
- offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+ offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60)
h = {"hostDataSourceId": self.id,
"type": 1,
"notifyCC": True,
"year": offsetstart.year,
- "month": offsetstart.month-1,
+ "month": offsetstart.month - 1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
- "endMonth": offsetend.month-1,
+ "endMonth": offsetend.month - 1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
@@ -1905,7 +1904,7 @@ class Hostgroup(LogicMonitor):
if properties is not None and group is not None:
self.module.debug("Comparing simple group properties")
if (group["alertEnable"] != self.alertenable or
- group["description"] != self.description):
+ group["description"] != self.description):
return True
@@ -1915,7 +1914,7 @@ class Hostgroup(LogicMonitor):
for prop in properties:
if prop["name"] not in ignore:
if ("*******" in prop["value"] and
- self._verify_property(prop["name"])):
+ self._verify_property(prop["name"])):
p[prop["name"]] = (
self.properties[prop["name"]])
@@ -1965,17 +1964,17 @@ class Hostgroup(LogicMonitor):
self.fail(
msg="Error: Unable to retrieve timezone offset")
- offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+ offsetend = offsetstart + datetime.timedelta(0, int(duration) * 60)
h = {"hostGroupId": self.info["id"],
"type": 1,
"year": offsetstart.year,
- "month": offsetstart.month-1,
+ "month": offsetstart.month - 1,
"day": offsetstart.day,
"hour": offsetstart.hour,
"minute": offsetstart.minute,
"endYear": offsetend.year,
- "endMonth": offsetend.month-1,
+ "endMonth": offsetend.month - 1,
"endDay": offsetend.day,
"endHour": offsetend.hour,
"endMinute": offsetend.minute}
@@ -2086,8 +2085,8 @@ def selector(module):
elif module.params["target"] == "host":
# Make sure required parameter collector is specified
if ((module.params["action"] == "add" or
- module.params["displayname"] is None) and
- module.params["collector"] is None):
+ module.params["displayname"] is None) and
+ module.params["collector"] is None):
module.fail_json(
msg="Parameter 'collector' required.")
diff --git a/lib/ansible/modules/monitoring/logicmonitor_facts.py b/lib/ansible/modules/monitoring/logicmonitor_facts.py
index eda22654d1..0b98cf2705 100644
--- a/lib/ansible/modules/monitoring/logicmonitor_facts.py
+++ b/lib/ansible/modules/monitoring/logicmonitor_facts.py
@@ -242,7 +242,7 @@ class LogicMonitor(object):
for host in hosts:
if (host["hostName"] == hostname and
- host["agentId"] == collector["id"]):
+ host["agentId"] == collector["id"]):
self.module.debug("Host match found")
return host
@@ -263,7 +263,7 @@ class LogicMonitor(object):
self.module.debug("Looking for displayname " + displayname)
self.module.debug("Making RPC call to 'getHost'")
host_json = (json.loads(self.rpc("getHost",
- {"displayName": displayname})))
+ {"displayName": displayname})))
if host_json["status"] == 200:
self.module.debug("RPC call succeeded")
@@ -431,7 +431,7 @@ class Host(LogicMonitor):
# Used the host information to grab the collector description
# if not provided
if (not hasattr(self.params, "collector") and
- "agentDescription" in info):
+ "agentDescription" in info):
self.module.debug("Setting collector from host response. " +
"Collector " + info["agentDescription"])
self.params["collector"] = info["agentDescription"]
@@ -464,8 +464,8 @@ class Host(LogicMonitor):
if self.info:
self.module.debug("Making RPC call to 'getHostProperties'")
properties_json = (json.loads(self.rpc("getHostProperties",
- {'hostId': self.info["id"],
- "filterSystemProperties": True})))
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
diff --git a/lib/ansible/modules/monitoring/nagios.py b/lib/ansible/modules/monitoring/nagios.py
index b851a8c362..1cf7480418 100644
--- a/lib/ansible/modules/monitoring/nagios.py
+++ b/lib/ansible/modules/monitoring/nagios.py
@@ -231,7 +231,7 @@ def which_cmdfile():
'/etc/icinga/icinga.cfg',
# icinga installed from source (default location)
'/usr/local/icinga/etc/icinga.cfg',
- ]
+ ]
for path in locations:
if os.path.exists(path):
@@ -257,8 +257,7 @@ def main():
'command',
'servicegroup_host_downtime',
'servicegroup_service_downtime',
- ]
-
+ ]
module = AnsibleModule(
argument_spec=dict(
@@ -271,8 +270,8 @@ def main():
cmdfile=dict(default=which_cmdfile()),
services=dict(default=None, aliases=['service']),
command=dict(required=False, default=None),
- )
)
+ )
action = module.params['action']
host = module.params['host']
@@ -595,7 +594,6 @@ class Nagios(object):
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
self._write_command(dt_del_cmd_str)
-
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
@@ -934,7 +932,7 @@ class Nagios(object):
cmd = [
"DISABLE_HOST_SVC_NOTIFICATIONS",
"DISABLE_HOST_NOTIFICATIONS"
- ]
+ ]
nagios_return = True
return_str_list = []
for c in cmd:
@@ -962,7 +960,7 @@ class Nagios(object):
cmd = [
"ENABLE_HOST_SVC_NOTIFICATIONS",
"ENABLE_HOST_NOTIFICATIONS"
- ]
+ ]
nagios_return = True
return_str_list = []
for c in cmd:
@@ -1027,19 +1025,19 @@ class Nagios(object):
minutes=self.minutes)
elif self.action == 'delete_downtime':
- if self.services=='host':
+ if self.services == 'host':
self.delete_host_downtime(self.host)
- elif self.services=='all':
+ elif self.services == 'all':
self.delete_host_downtime(self.host, comment='')
else:
self.delete_host_downtime(self.host, services=self.services)
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
- self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
+ self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes)
elif self.action == "servicegroup_service_downtime":
if self.servicegroup:
- self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
+ self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes)
# toggle the host AND service alerts
elif self.action == 'silence':
@@ -1077,7 +1075,7 @@ class Nagios(object):
# wtf?
else:
- self.module.fail_json(msg="unknown action specified: '%s'" % \
+ self.module.fail_json(msg="unknown action specified: '%s'" %
self.action)
self.module.exit_json(nagios_commands=self.command_results,
diff --git a/lib/ansible/modules/monitoring/newrelic_deployment.py b/lib/ansible/modules/monitoring/newrelic_deployment.py
index 24528100df..92b0e4e42a 100644
--- a/lib/ansible/modules/monitoring/newrelic_deployment.py
+++ b/lib/ansible/modules/monitoring/newrelic_deployment.py
@@ -86,6 +86,7 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode
# Module execution.
#
+
def main():
module = AnsibleModule(
@@ -99,7 +100,7 @@ def main():
user=dict(required=False),
appname=dict(required=False),
environment=dict(required=False),
- validate_certs = dict(default='yes', type='bool'),
+ validate_certs=dict(default='yes', type='bool'),
),
required_one_of=[['app_name', 'application_id']],
supports_check_mode=True
@@ -117,7 +118,7 @@ def main():
else:
module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
- for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
+ for item in ["changelog", "description", "revision", "user", "appname", "environment"]:
if module.params[item]:
params[item] = module.params[item]
diff --git a/lib/ansible/modules/monitoring/pagerduty.py b/lib/ansible/modules/monitoring/pagerduty.py
index 7bf0e6a783..7d9f57af03 100644
--- a/lib/ansible/modules/monitoring/pagerduty.py
+++ b/lib/ansible/modules/monitoring/pagerduty.py
@@ -112,7 +112,7 @@ options:
version_added: 1.5.1
'''
-EXAMPLES='''
+EXAMPLES = '''
# List ongoing maintenance windows using a user/passwd
- pagerduty:
name: companyabc
@@ -171,6 +171,7 @@ import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
+
def auth_header(user, passwd, token):
if token:
return "Token token=%s" % token
@@ -178,6 +179,7 @@ def auth_header(user, passwd, token):
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
return "Basic %s" % auth
+
def ongoing(module, name, user, passwd, token):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
headers = {"Authorization": auth_header(user, passwd, token)}
@@ -203,7 +205,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
headers = {
'Authorization': auth_header(user, passwd, token),
- 'Content-Type' : 'application/json',
+ 'Content-Type': 'application/json',
}
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
@@ -225,11 +227,12 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
return False, json_out, True
+
def absent(module, name, user, passwd, token, requester_id, service):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
headers = {
'Authorization': auth_header(user, passwd, token),
- 'Content-Type' : 'application/json',
+ 'Content-Type': 'application/json',
}
request_data = {}
@@ -266,7 +269,7 @@ def main():
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
desc=dict(default='Created by Ansible', required=False),
- validate_certs = dict(default='yes', type='bool'),
+ validate_certs=dict(default='yes', type='bool'),
)
)
@@ -280,7 +283,7 @@ def main():
minutes = module.params['minutes']
token = module.params['token']
desc = module.params['desc']
- requester_id = module.params['requester_id']
+ requester_id = module.params['requester_id']
if not token and not (user or passwd):
module.fail_json(msg="neither user and passwd nor token specified")
@@ -290,7 +293,7 @@ def main():
module.fail_json(msg="service not specified")
(rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
if rc == 0:
- changed=True
+ changed = True
if state == "ongoing":
(rc, out, changed) = ongoing(module, name, user, passwd, token)
@@ -301,7 +304,6 @@ def main():
if rc != 0:
module.fail_json(msg="failed", result=out)
-
module.exit_json(msg="success", result=out, changed=changed)
diff --git a/lib/ansible/modules/monitoring/pingdom.py b/lib/ansible/modules/monitoring/pingdom.py
index 68c7dcb137..3fdf696301 100644
--- a/lib/ansible/modules/monitoring/pingdom.py
+++ b/lib/ansible/modules/monitoring/pingdom.py
@@ -98,7 +98,7 @@ def pause(checkid, uid, passwd, key):
check = c.get_check(checkid)
name = check.name
result = check.status
- #if result != "paused": # api output buggy - accept raw exception for now
+ # if result != "paused": # api output buggy - accept raw exception for now
# return (True, name, result)
return (False, name, result)
@@ -110,7 +110,7 @@ def unpause(checkid, uid, passwd, key):
check = c.get_check(checkid)
name = check.name
result = check.status
- #if result != "up": # api output buggy - accept raw exception for now
+ # if result != "up": # api output buggy - accept raw exception for now
# return (True, name, result)
return (False, name, result)
diff --git a/lib/ansible/modules/monitoring/sensu_check.py b/lib/ansible/modules/monitoring/sensu_check.py
index fdc24fb422..afa259f40f 100644
--- a/lib/ansible/modules/monitoring/sensu_check.py
+++ b/lib/ansible/modules/monitoring/sensu_check.py
@@ -281,12 +281,12 @@ def sensu_check(module, path, name, state='present', backup=False):
if module.params['custom']:
# Convert to json
custom_params = module.params['custom']
- overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
if overwrited_fields:
msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
module.fail_json(msg=msg)
- for k,v in custom_params.items():
+ for k, v in custom_params.items():
if k in config['checks'][name]:
if not config['checks'][name][k] == v:
changed = True
@@ -298,7 +298,7 @@ def sensu_check(module, path, name, state='present', backup=False):
simple_opts += custom_params.keys()
# Remove obsolete custom params
- for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
changed = True
reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
del check[opt]
@@ -345,30 +345,30 @@ def sensu_check(module, path, name, state='present', backup=False):
def main():
- arg_spec = {'name': {'type': 'str', 'required': True},
- 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
- 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
- 'backup': {'type': 'bool', 'default': 'no'},
- 'command': {'type': 'str'},
- 'handlers': {'type': 'list'},
- 'subscribers': {'type': 'list'},
- 'interval': {'type': 'int'},
- 'timeout': {'type': 'int'},
- 'ttl': {'type': 'int'},
- 'handle': {'type': 'bool'},
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'ttl': {'type': 'int'},
+ 'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
- 'subdue_end': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
- 'metric': {'type': 'bool', 'default': 'no'},
- 'standalone': {'type': 'bool'},
- 'publish': {'type': 'bool'},
- 'occurrences': {'type': 'int'},
- 'refresh': {'type': 'int'},
- 'aggregate': {'type': 'bool'},
- 'low_flap_threshold': {'type': 'int'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
- 'custom': {'type': 'dict'},
- 'source': {'type': 'str'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
}
required_together = [['subdue_begin', 'subdue_end']]
diff --git a/lib/ansible/modules/monitoring/sensu_subscription.py b/lib/ansible/modules/monitoring/sensu_subscription.py
index 3e2ac652ae..8a6808a87b 100644
--- a/lib/ansible/modules/monitoring/sensu_subscription.py
+++ b/lib/ansible/modules/monitoring/sensu_subscription.py
@@ -135,9 +135,9 @@ def sensu_subscription(module, path, name, state='present', backup=False):
def main():
- arg_spec = {'name': {'type': 'str', 'required': True},
- 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
- 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
}
diff --git a/lib/ansible/modules/monitoring/stackdriver.py b/lib/ansible/modules/monitoring/stackdriver.py
index e530fea2dd..80e8b5dd71 100644
--- a/lib/ansible/modules/monitoring/stackdriver.py
+++ b/lib/ansible/modules/monitoring/stackdriver.py
@@ -124,6 +124,7 @@ def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_
return do_send_request(module, deploy_api, params, key)
+
def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
"""Send an annotation event to Stackdriver"""
annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
@@ -141,13 +142,14 @@ def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None,
return do_send_request(module, annotation_api, params, key)
+
def do_send_request(module, url, params, key):
data = json.dumps(params)
headers = {
'Content-Type': 'application/json',
'x-stackdriver-apikey': key
}
- response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
if info['status'] != 200:
module.fail_json(msg="Unable to send msg: %s" % info['msg'])
diff --git a/lib/ansible/modules/monitoring/uptimerobot.py b/lib/ansible/modules/monitoring/uptimerobot.py
index e0c87b0ceb..22426cfb76 100644
--- a/lib/ansible/modules/monitoring/uptimerobot.py
+++ b/lib/ansible/modules/monitoring/uptimerobot.py
@@ -120,10 +120,10 @@ def pauseMonitor(module, params):
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(required=True, choices=['started', 'paused']),
- apikey = dict(required=True),
- monitorid = dict(required=True)
+ argument_spec=dict(
+ state=dict(required=True, choices=['started', 'paused']),
+ apikey=dict(required=True),
+ monitorid=dict(required=True)
),
supports_check_mode=SUPPORTS_CHECK_MODE
)
diff --git a/lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py b/lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py
index ffd0726fdf..bb2d8ecc89 100644
--- a/lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py
+++ b/lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py
@@ -186,7 +186,7 @@ class Proxy(object):
old_interface = {}
if 'interface' in self.existing_data and \
len(self.existing_data['interface']) > 0:
- old_interface = self.existing_data['interface']
+ old_interface = self.existing_data['interface']
final_interface = old_interface.copy()
final_interface.update(new_interface)
@@ -206,7 +206,7 @@ class Proxy(object):
for item in data:
if data[item] and item in self.existing_data and \
self.existing_data[item] != data[item]:
- parameters[item] = data[item]
+ parameters[item] = data[item]
if 'interface' in parameters:
parameters.pop('interface')
diff --git a/lib/ansible/modules/net_tools/cloudflare_dns.py b/lib/ansible/modules/net_tools/cloudflare_dns.py
index e2ee91c854..d26f01508d 100644
--- a/lib/ansible/modules/net_tools/cloudflare_dns.py
+++ b/lib/ansible/modules/net_tools/cloudflare_dns.py
@@ -278,35 +278,34 @@ from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
-
class CloudflareAPI(object):
cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
changed = False
def __init__(self, module):
- self.module = module
+ self.module = module
self.account_api_token = module.params['account_api_token']
- self.account_email = module.params['account_email']
- self.port = module.params['port']
- self.priority = module.params['priority']
- self.proto = module.params['proto']
- self.proxied = module.params['proxied']
- self.record = module.params['record']
- self.service = module.params['service']
- self.is_solo = module.params['solo']
- self.state = module.params['state']
- self.timeout = module.params['timeout']
- self.ttl = module.params['ttl']
- self.type = module.params['type']
- self.value = module.params['value']
- self.weight = module.params['weight']
- self.zone = module.params['zone']
+ self.account_email = module.params['account_email']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = module.params['proto']
+ self.proxied = module.params['proxied']
+ self.record = module.params['record']
+ self.service = module.params['service']
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = module.params['zone']
if self.record == '@':
self.record = self.zone
- if (self.type in ['CNAME','NS','MX','SRV']) and (self.value is not None):
+ if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
self.value = self.value.rstrip('.')
if (self.type == 'SRV'):
@@ -318,10 +317,10 @@ class CloudflareAPI(object):
if not self.record.endswith(self.zone):
self.record = self.record + '.' + self.zone
- def _cf_simple_api_call(self,api_call,method='GET',payload=None):
- headers = { 'X-Auth-Email': self.account_email,
- 'X-Auth-Key': self.account_api_token,
- 'Content-Type': 'application/json' }
+ def _cf_simple_api_call(self, api_call, method='GET', payload=None):
+ headers = {'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_token,
+ 'Content-Type': 'application/json'}
data = None
if payload:
try:
@@ -336,28 +335,28 @@ class CloudflareAPI(object):
method=method,
timeout=self.timeout)
- if info['status'] not in [200,304,400,401,403,429,405,415]:
- self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call,info['status']))
+ if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call, info['status']))
error_msg = ''
if info['status'] == 401:
# Unauthorized
- error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 403:
# Forbidden
- error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 429:
# Too many requests
- error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 405:
# Method not allowed
- error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 415:
# Unsupported Media Type
- error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
- elif info ['status'] == 400:
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 400:
# Bad Request
- error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
result = None
try:
@@ -375,22 +374,22 @@ class CloudflareAPI(object):
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
# received an error status but no data with details on what failed
- if (info['status'] not in [200,304]) and (result is None):
+ if (info['status'] not in [200, 304]) and (result is None):
self.module.fail_json(msg=error_msg)
if not result['success']:
error_msg += "; Error details: "
for error in result['errors']:
- error_msg += "code: {0}, error: {1}; ".format(error['code'],error['message'])
+ error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
if 'error_chain' in error:
for chain_error in error['error_chain']:
- error_msg += "code: {0}, error: {1}; ".format(chain_error['code'],chain_error['message'])
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
self.module.fail_json(msg=error_msg)
return result, info['status']
- def _cf_api_call(self,api_call,method='GET',payload=None):
- result, status = self._cf_simple_api_call(api_call,method,payload)
+ def _cf_api_call(self, api_call, method='GET', payload=None):
+ result, status = self._cf_simple_api_call(api_call, method, payload)
data = result['result']
@@ -401,19 +400,19 @@ class CloudflareAPI(object):
parameters = ['page={0}'.format(next_page)]
# strip "page" parameter from call parameters (if there are any)
if '?' in api_call:
- raw_api_call,query = api_call.split('?',1)
+ raw_api_call, query = api_call.split('?', 1)
parameters += [param for param in query.split('&') if not param.startswith('page')]
else:
raw_api_call = api_call
while next_page <= pagination['total_pages']:
raw_api_call += '?' + '&'.join(parameters)
- result, status = self._cf_simple_api_call(raw_api_call,method,payload)
+ result, status = self._cf_simple_api_call(raw_api_call, method, payload)
data += result['result']
next_page += 1
return data, status
- def _get_zone_id(self,zone=None):
+ def _get_zone_id(self, zone=None):
if not zone:
zone = self.zone
@@ -426,16 +425,16 @@ class CloudflareAPI(object):
return zones[0]['id']
- def get_zones(self,name=None):
+ def get_zones(self, name=None):
if not name:
name = self.zone
param = ''
if name:
- param = '?' + urlencode({'name' : name})
- zones,status = self._cf_api_call('/zones' + param)
+ param = '?' + urlencode({'name': name})
+ zones, status = self._cf_api_call('/zones' + param)
return zones
- def get_dns_records(self,zone_name=None,type=None,record=None,value=''):
+ def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
if not zone_name:
zone_name = self.zone
if not type:
@@ -459,16 +458,16 @@ class CloudflareAPI(object):
if query:
api_call += '?' + urlencode(query)
- records,status = self._cf_api_call(api_call)
+ records, status = self._cf_api_call(api_call)
return records
- def delete_dns_records(self,**kwargs):
+ def delete_dns_records(self, **kwargs):
params = {}
- for param in ['port','proto','service','solo','type','record','value','weight','zone']:
+ for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone']:
if param in kwargs:
params[param] = kwargs[param]
else:
- params[param] = getattr(self,param)
+ params[param] = getattr(self, param)
records = []
content = params['value']
@@ -481,27 +480,27 @@ class CloudflareAPI(object):
else:
search_value = content
- records = self.get_dns_records(params['zone'],params['type'],search_record,search_value)
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
for rr in records:
if params['solo']:
if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
self.changed = True
if not self.module.check_mode:
- result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE')
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
else:
self.changed = True
if not self.module.check_mode:
- result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE')
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
return self.changed
- def ensure_dns_record(self,**kwargs):
+ def ensure_dns_record(self, **kwargs):
params = {}
- for param in ['port','priority','proto','proxied','service','ttl','type','record','value','weight','zone']:
+ for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone']:
if param in kwargs:
params[param] = kwargs[param]
else:
- params[param] = getattr(self,param)
+ params[param] = getattr(self, param)
search_value = params['value']
search_record = params['record']
@@ -509,7 +508,7 @@ class CloudflareAPI(object):
if (params['type'] is None) or (params['record'] is None):
self.module.fail_json(msg="You must provide a type and a record to create a new record")
- if (params['type'] in [ 'A','AAAA','CNAME','TXT','MX','NS','SPF']):
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
if not params['value']:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
@@ -527,11 +526,11 @@ class CloudflareAPI(object):
"ttl": params['ttl']
}
- if (params['type'] in [ 'A', 'AAAA', 'CNAME' ]):
+ if (params['type'] in ['A', 'AAAA', 'CNAME']):
new_record["proxied"] = params["proxied"]
if params['type'] == 'MX':
- for attr in [params['priority'],params['value']]:
+ for attr in [params['priority'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide priority and a value to create this record type")
new_record = {
@@ -543,7 +542,7 @@ class CloudflareAPI(object):
}
if params['type'] == 'SRV':
- for attr in [params['port'],params['priority'],params['proto'],params['service'],params['weight'],params['value']]:
+ for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
srv_data = {
@@ -555,12 +554,12 @@ class CloudflareAPI(object):
"proto": params['proto'],
"service": params['service']
}
- new_record = { "type": params['type'], "ttl": params['ttl'], 'data': srv_data }
+ new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
zone_id = self._get_zone_id(params['zone'])
- records = self.get_dns_records(params['zone'],params['type'],search_record,search_value)
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
@@ -569,7 +568,7 @@ class CloudflareAPI(object):
if len(records) == 1:
cur_record = records[0]
do_update = False
- if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl'] ):
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
do_update = True
if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
do_update = True
@@ -582,53 +581,54 @@ class CloudflareAPI(object):
if self.module.check_mode:
result = new_record
else:
- result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id,records[0]['id']),'PUT',new_record)
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
self.changed = True
- return result,self.changed
+ return result, self.changed
else:
- return records,self.changed
+ return records, self.changed
if self.module.check_mode:
result = new_record
else:
- result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id),'POST',new_record)
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
self.changed = True
- return result,self.changed
+ return result, self.changed
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- account_api_token = dict(required=True, no_log=True, type='str'),
- account_email = dict(required=True, type='str'),
- port = dict(required=False, default=None, type='int'),
- priority = dict(required=False, default=1, type='int'),
- proto = dict(required=False, default=None, choices=[ 'tcp', 'udp' ], type='str'),
- proxied = dict(required=False, default=False, type='bool'),
- record = dict(required=False, default='@', aliases=['name'], type='str'),
- service = dict(required=False, default=None, type='str'),
- solo = dict(required=False, default=None, type='bool'),
- state = dict(required=False, default='present', choices=['present', 'absent'], type='str'),
- timeout = dict(required=False, default=30, type='int'),
- ttl = dict(required=False, default=1, type='int'),
- type = dict(required=False, default=None, choices=[ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ], type='str'),
- value = dict(required=False, default=None, aliases=['content'], type='str'),
- weight = dict(required=False, default=1, type='int'),
- zone = dict(required=True, default=None, aliases=['domain'], type='str'),
+ argument_spec=dict(
+ account_api_token=dict(required=True, no_log=True, type='str'),
+ account_email=dict(required=True, type='str'),
+ port=dict(required=False, default=None, type='int'),
+ priority=dict(required=False, default=1, type='int'),
+ proto=dict(required=False, default=None, choices=['tcp', 'udp'], type='str'),
+ proxied=dict(required=False, default=False, type='bool'),
+ record=dict(required=False, default='@', aliases=['name'], type='str'),
+ service=dict(required=False, default=None, type='str'),
+ solo=dict(required=False, default=None, type='bool'),
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ timeout=dict(required=False, default=30, type='int'),
+ ttl=dict(required=False, default=1, type='int'),
+ type=dict(required=False, default=None, choices=['A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF'], type='str'),
+ value=dict(required=False, default=None, aliases=['content'], type='str'),
+ weight=dict(required=False, default=1, type='int'),
+ zone=dict(required=True, default=None, aliases=['domain'], type='str'),
),
- supports_check_mode = True,
- required_if = ([
- ('state','present',['record','type']),
- ('type','MX',['priority','value']),
- ('type','SRV',['port','priority','proto','service','value','weight']),
- ('type','A',['value']),
- ('type','AAAA',['value']),
- ('type','CNAME',['value']),
- ('type','TXT',['value']),
- ('type','NS',['value']),
- ('type','SPF',['value'])
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['record', 'type']),
+ ('type', 'MX', ['priority', 'value']),
+ ('type', 'SRV', ['port', 'priority', 'proto', 'service', 'value', 'weight']),
+ ('type', 'A', ['value']),
+ ('type', 'AAAA', ['value']),
+ ('type', 'CNAME', ['value']),
+ ('type', 'TXT', ['value']),
+ ('type', 'NS', ['value']),
+ ('type', 'SPF', ['value'])
]
),
- required_one_of = (
- [['record','value','type']]
+ required_one_of=(
+ [['record', 'value', 'type']]
)
)
@@ -645,11 +645,11 @@ def main():
# delete all records matching record name + type
if cf_api.is_solo:
changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
- result,changed = cf_api.ensure_dns_record()
- if isinstance(result,list):
- module.exit_json(changed=changed,result={'record': result[0]})
+ result, changed = cf_api.ensure_dns_record()
+ if isinstance(result, list):
+ module.exit_json(changed=changed, result={'record': result[0]})
else:
- module.exit_json(changed=changed,result={'record': result})
+ module.exit_json(changed=changed, result={'record': result})
else:
# force solo to False, just to be sure
changed = cf_api.delete_dns_records(solo=False)
diff --git a/lib/ansible/modules/net_tools/dnsimple.py b/lib/ansible/modules/net_tools/dnsimple.py
index 8dfdcaed86..5739ee8a04 100644
--- a/lib/ansible/modules/net_tools/dnsimple.py
+++ b/lib/ansible/modules/net_tools/dnsimple.py
@@ -188,26 +188,26 @@ def main():
state=dict(required=False, choices=['present', 'absent']),
solo=dict(required=False, type='bool'),
),
- required_together = (
+ required_together=(
['record', 'value']
),
- supports_check_mode = True,
+ supports_check_mode=True,
)
if not HAS_DNSIMPLE:
module.fail_json(msg="dnsimple required for this module")
- account_email = module.params.get('account_email')
+ account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
- domain = module.params.get('domain')
- record = module.params.get('record')
- record_ids = module.params.get('record_ids')
- record_type = module.params.get('type')
- ttl = module.params.get('ttl')
- value = module.params.get('value')
- priority = module.params.get('priority')
- state = module.params.get('state')
- is_solo = module.params.get('solo')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
if account_email and account_api_token:
client = DNSimple(email=account_email, api_token=account_api_token)
@@ -278,7 +278,7 @@ def main():
if rr['ttl'] != ttl or rr['prio'] != priority:
data = {}
if ttl:
- data['ttl'] = ttl
+ data['ttl'] = ttl
if priority:
data['prio'] = priority
if module.check_mode:
@@ -290,12 +290,12 @@ def main():
else:
# create it
data = {
- 'name': record,
+ 'name': record,
'record_type': record_type,
- 'content': value,
+ 'content': value,
}
if ttl:
- data['ttl'] = ttl
+ data['ttl'] = ttl
if priority:
data['prio'] = priority
if module.check_mode:
@@ -315,7 +315,7 @@ def main():
# Make sure these record_ids either all exist or none
if domain and record_ids:
current_records = [str(r['record']['id']) for r in client.records(str(domain))]
- wanted_records = [str(r) for r in record_ids]
+ wanted_records = [str(r) for r in record_ids]
if state == 'present':
difference = list(set(wanted_records) - set(current_records))
if difference:
diff --git a/lib/ansible/modules/net_tools/dnsmadeeasy.py b/lib/ansible/modules/net_tools/dnsmadeeasy.py
index 57bf8215bc..f560f32d87 100644
--- a/lib/ansible/modules/net_tools/dnsmadeeasy.py
+++ b/lib/ansible/modules/net_tools/dnsmadeeasy.py
@@ -401,7 +401,7 @@ class DME2(object):
self.record_map = None # ["record_name"] => ID
self.records = None # ["record_ID"] => <record>
self.all_records = None
- self.contactList_map = None # ["contactList_name"] => ID
+ self.contactList_map = None # ["contactList_name"] => ID
# Lookup the domain ID if passed as a domain name vs. ID
if not self.domain.isdigit():
@@ -551,6 +551,7 @@ class DME2(object):
# Module execution.
#
+
def main():
module = AnsibleModule(
@@ -581,7 +582,7 @@ def main():
ip3=dict(required=False),
ip4=dict(required=False),
ip5=dict(required=False),
- validate_certs = dict(default='yes', type='bool'),
+ validate_certs=dict(default='yes', type='bool'),
),
required_together=(
['record_value', 'record_ttl', 'record_type']
diff --git a/lib/ansible/modules/net_tools/ipinfoio_facts.py b/lib/ansible/modules/net_tools/ipinfoio_facts.py
index 97e8f82a26..f1e9efae1d 100644
--- a/lib/ansible/modules/net_tools/ipinfoio_facts.py
+++ b/lib/ansible/modules/net_tools/ipinfoio_facts.py
@@ -96,7 +96,7 @@ class IpinfoioFacts(object):
self.module = module
def get_geo_data(self):
- response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
timeout=self.timeout)
try:
info['status'] == 200
@@ -116,7 +116,7 @@ class IpinfoioFacts(object):
def main():
- module = AnsibleModule( # NOQA
+ module = AnsibleModule( # NOQA
argument_spec=dict(
http_agent=dict(default=USER_AGENT),
timeout=dict(type='int', default=10),
diff --git a/lib/ansible/modules/net_tools/nmcli.py b/lib/ansible/modules/net_tools/nmcli.py
index 702fb17684..8b71d603de 100644
--- a/lib/ansible/modules/net_tools/nmcli.py
+++ b/lib/ansible/modules/net_tools/nmcli.py
@@ -13,7 +13,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
-DOCUMENTATION='''
+DOCUMENTATION = '''
---
module: nmcli
author: "Chris Long (@alcamie101)"
@@ -208,7 +208,7 @@ options:
'''
-EXAMPLES='''
+EXAMPLES = '''
# These examples are using the following inventory:
#
# ## Directory layout:
@@ -482,17 +482,17 @@ EXAMPLES='''
# - 10 Connection, device, or access point does not exist.
'''
-HAVE_DBUS=False
+HAVE_DBUS = False
try:
import dbus
- HAVE_DBUS=True
+ HAVE_DBUS = True
except ImportError:
pass
-HAVE_NM_CLIENT=False
+HAVE_NM_CLIENT = False
try:
from gi.repository import NetworkManager, NMClient
- HAVE_NM_CLIENT=True
+ HAVE_NM_CLIENT = True
except ImportError:
pass
@@ -512,75 +512,74 @@ class Nmcli(object):
All subclasses MUST define platform and distribution (which may be None).
"""
- platform='Generic'
- distribution=None
+ platform = 'Generic'
+ distribution = None
if HAVE_DBUS:
- bus=dbus.SystemBus()
+ bus = dbus.SystemBus()
# The following is going to be used in dbus code
- DEVTYPES={1: "Ethernet",
- 2: "Wi-Fi",
- 5: "Bluetooth",
- 6: "OLPC",
- 7: "WiMAX",
- 8: "Modem",
- 9: "InfiniBand",
- 10: "Bond",
- 11: "VLAN",
- 12: "ADSL",
- 13: "Bridge",
- 14: "Generic",
- 15: "Team"
+ DEVTYPES = {1: "Ethernet",
+ 2: "Wi-Fi",
+ 5: "Bluetooth",
+ 6: "OLPC",
+ 7: "WiMAX",
+ 8: "Modem",
+ 9: "InfiniBand",
+ 10: "Bond",
+ 11: "VLAN",
+ 12: "ADSL",
+ 13: "Bridge",
+ 14: "Generic",
+ 15: "Team"
}
- STATES={0: "Unknown",
- 10: "Unmanaged",
- 20: "Unavailable",
- 30: "Disconnected",
- 40: "Prepare",
- 50: "Config",
- 60: "Need Auth",
- 70: "IP Config",
- 80: "IP Check",
- 90: "Secondaries",
- 100: "Activated",
- 110: "Deactivating",
- 120: "Failed"
- }
-
+ STATES = {0: "Unknown",
+ 10: "Unmanaged",
+ 20: "Unavailable",
+ 30: "Disconnected",
+ 40: "Prepare",
+ 50: "Config",
+ 60: "Need Auth",
+ 70: "IP Config",
+ 80: "IP Check",
+ 90: "Secondaries",
+ 100: "Activated",
+ 110: "Deactivating",
+ 120: "Failed"
+ }
def __init__(self, module):
- self.module=module
- self.state=module.params['state']
- self.autoconnect=module.params['autoconnect']
- self.conn_name=module.params['conn_name']
- self.master=module.params['master']
- self.ifname=module.params['ifname']
- self.type=module.params['type']
- self.ip4=module.params['ip4']
- self.gw4=module.params['gw4']
- self.dns4=' '.join(module.params['dns4'])
- self.ip6=module.params['ip6']
- self.gw6=module.params['gw6']
- self.dns6=module.params['dns6']
- self.mtu=module.params['mtu']
- self.stp=module.params['stp']
- self.priority=module.params['priority']
- self.mode=module.params['mode']
- self.miimon=module.params['miimon']
- self.downdelay=module.params['downdelay']
- self.updelay=module.params['updelay']
- self.arp_interval=module.params['arp_interval']
- self.arp_ip_target=module.params['arp_ip_target']
- self.slavepriority=module.params['slavepriority']
- self.forwarddelay=module.params['forwarddelay']
- self.hellotime=module.params['hellotime']
- self.maxage=module.params['maxage']
- self.ageingtime=module.params['ageingtime']
- self.mac=module.params['mac']
- self.vlanid=module.params['vlanid']
- self.vlandev=module.params['vlandev']
- self.flags=module.params['flags']
- self.ingress=module.params['ingress']
- self.egress=module.params['egress']
+ self.module = module
+ self.state = module.params['state']
+ self.autoconnect = module.params['autoconnect']
+ self.conn_name = module.params['conn_name']
+ self.master = module.params['master']
+ self.ifname = module.params['ifname']
+ self.type = module.params['type']
+ self.ip4 = module.params['ip4']
+ self.gw4 = module.params['gw4']
+ self.dns4 = ' '.join(module.params['dns4'])
+ self.ip6 = module.params['ip6']
+ self.gw6 = module.params['gw6']
+ self.dns6 = module.params['dns6']
+ self.mtu = module.params['mtu']
+ self.stp = module.params['stp']
+ self.priority = module.params['priority']
+ self.mode = module.params['mode']
+ self.miimon = module.params['miimon']
+ self.downdelay = module.params['downdelay']
+ self.updelay = module.params['updelay']
+ self.arp_interval = module.params['arp_interval']
+ self.arp_ip_target = module.params['arp_ip_target']
+ self.slavepriority = module.params['slavepriority']
+ self.forwarddelay = module.params['forwarddelay']
+ self.hellotime = module.params['hellotime']
+ self.maxage = module.params['maxage']
+ self.ageingtime = module.params['ageingtime']
+ self.mac = module.params['mac']
+ self.vlanid = module.params['vlanid']
+ self.vlandev = module.params['vlandev']
+ self.flags = module.params['flags']
+ self.ingress = module.params['ingress']
+ self.egress = module.params['egress']
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
@@ -589,41 +588,41 @@ class Nmcli(object):
try:
# returns a dict of dicts mapping name::setting, where setting is a dict
# mapping key::value. Each member of the 'setting' dict is a secret
- secrets=proxy.GetSecrets(setting_name)
+ secrets = proxy.GetSecrets(setting_name)
# Copy the secrets into our connection config
for setting in secrets:
for key in secrets[setting]:
- config[setting_name][key]=secrets[setting][key]
+ config[setting_name][key] = secrets[setting][key]
except:
pass
def dict_to_string(self, d):
# Try to trivially translate a dictionary's elements into nice string
# formatting.
- dstr=""
+ dstr = ""
for key in d:
- val=d[key]
- str_val=""
- add_string=True
+ val = d[key]
+ str_val = ""
+ add_string = True
if isinstance(val, dbus.Array):
for elt in val:
if isinstance(elt, dbus.Byte):
- str_val+="%s " % int(elt)
+ str_val += "%s " % int(elt)
elif isinstance(elt, dbus.String):
- str_val+="%s" % elt
+ str_val += "%s" % elt
elif isinstance(val, dbus.Dictionary):
- dstr+=self.dict_to_string(val)
- add_string=False
+ dstr += self.dict_to_string(val)
+ add_string = False
else:
- str_val=val
+ str_val = val
if add_string:
- dstr+="%s: %s\n" % ( key, str_val)
+ dstr += "%s: %s\n" % (key, str_val)
return dstr
def connection_to_string(self, config):
# dump a connection configuration to use in list_connection_info
- setting_list=[]
+ setting_list = []
for setting_name in config:
setting_list.append(self.dict_to_string(config[setting_name]))
return setting_list
@@ -637,18 +636,18 @@ class Nmcli(object):
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
- bus=dbus.SystemBus()
+ bus = dbus.SystemBus()
- service_name="org.freedesktop.NetworkManager"
- proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
- settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
- connection_paths=settings.ListConnections()
- connection_list=[]
+ service_name = "org.freedesktop.NetworkManager"
+ proxy = bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
+ settings = dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
+ connection_paths = settings.ListConnections()
+ connection_list = []
# List each connection's name, UUID, and type
for path in connection_paths:
- con_proxy=bus.get_object(service_name, path)
- settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
- config=settings_connection.GetSettings()
+ con_proxy = bus.get_object(service_name, path)
+ settings_connection = dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
+ config = settings_connection.GetSettings()
# Now get secrets too; we grab the secrets for each type of connection
# (since there isn't a "get all secrets" call because most of the time
@@ -662,7 +661,7 @@ class Nmcli(object):
self.merge_secrets(settings_connection, config, 'ppp')
# Get the details of the 'connection' setting
- s_con=config['connection']
+ s_con = config['connection']
connection_list.append(s_con['id'])
connection_list.append(s_con['uuid'])
connection_list.append(s_con['type'])
@@ -671,14 +670,14 @@ class Nmcli(object):
def connection_exists(self):
# we are going to use name and type in this instance to find if that connection exists and is of type x
- connections=self.list_connection_info()
+ connections = self.list_connection_info()
for con_item in connections:
- if self.conn_name==con_item:
+ if self.conn_name == con_item:
return True
def down_connection(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# if self.connection_exists():
cmd.append('con')
cmd.append('down')
@@ -686,14 +685,14 @@ class Nmcli(object):
return self.execute_command(cmd)
def up_connection(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('up')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def create_connection_team(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating team interface
cmd.append('con')
cmd.append('add')
@@ -727,7 +726,7 @@ class Nmcli(object):
return cmd
def modify_connection_team(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying team interface
cmd.append('con')
cmd.append('mod')
@@ -757,7 +756,7 @@ class Nmcli(object):
return cmd
def create_connection_team_slave(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating team-slave interface
cmd.append('connection')
cmd.append('add')
@@ -782,7 +781,7 @@ class Nmcli(object):
return cmd
def modify_connection_team_slave(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying team-slave interface
cmd.append('con')
cmd.append('mod')
@@ -795,7 +794,7 @@ class Nmcli(object):
return cmd
def create_connection_bond(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating bond interface
cmd.append('con')
cmd.append('add')
@@ -847,7 +846,7 @@ class Nmcli(object):
return cmd
def modify_connection_bond(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying bond interface
cmd.append('con')
cmd.append('mod')
@@ -876,7 +875,7 @@ class Nmcli(object):
return cmd
def create_connection_bond_slave(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating bond-slave interface
cmd.append('connection')
cmd.append('add')
@@ -898,7 +897,7 @@ class Nmcli(object):
return cmd
def modify_connection_bond_slave(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying bond-slave interface
cmd.append('con')
cmd.append('mod')
@@ -908,7 +907,7 @@ class Nmcli(object):
return cmd
def create_connection_ethernet(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
@@ -945,7 +944,7 @@ class Nmcli(object):
return cmd
def modify_connection_ethernet(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
@@ -980,110 +979,110 @@ class Nmcli(object):
return cmd
def create_connection_bridge(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating bridge interface
return cmd
def modify_connection_bridge(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying bridge interface
return cmd
def create_connection_vlan(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
return cmd
def modify_connection_vlan(self):
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
return cmd
def create_connection(self):
- cmd=[]
- if self.type=='team':
+ cmd = []
+ if self.type == 'team':
# cmd=self.create_connection_team()
if (self.dns4 is not None) or (self.dns6 is not None):
- cmd=self.create_connection_team()
+ cmd = self.create_connection_team()
self.execute_command(cmd)
- cmd=self.modify_connection_team()
+ cmd = self.modify_connection_team()
self.execute_command(cmd)
- cmd=self.up_connection()
+ cmd = self.up_connection()
return self.execute_command(cmd)
elif (self.dns4 is None) or (self.dns6 is None):
- cmd=self.create_connection_team()
+ cmd = self.create_connection_team()
return self.execute_command(cmd)
- elif self.type=='team-slave':
+ elif self.type == 'team-slave':
if self.mtu is not None:
- cmd=self.create_connection_team_slave()
+ cmd = self.create_connection_team_slave()
self.execute_command(cmd)
- cmd=self.modify_connection_team_slave()
+ cmd = self.modify_connection_team_slave()
self.execute_command(cmd)
# cmd=self.up_connection()
return self.execute_command(cmd)
else:
- cmd=self.create_connection_team_slave()
+ cmd = self.create_connection_team_slave()
return self.execute_command(cmd)
- elif self.type=='bond':
+ elif self.type == 'bond':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
- cmd=self.create_connection_bond()
+ cmd = self.create_connection_bond()
self.execute_command(cmd)
- cmd=self.modify_connection_bond()
+ cmd = self.modify_connection_bond()
self.execute_command(cmd)
- cmd=self.up_connection()
+ cmd = self.up_connection()
return self.execute_command(cmd)
else:
- cmd=self.create_connection_bond()
+ cmd = self.create_connection_bond()
return self.execute_command(cmd)
- elif self.type=='bond-slave':
- cmd=self.create_connection_bond_slave()
- elif self.type=='ethernet':
+ elif self.type == 'bond-slave':
+ cmd = self.create_connection_bond_slave()
+ elif self.type == 'ethernet':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
- cmd=self.create_connection_ethernet()
+ cmd = self.create_connection_ethernet()
self.execute_command(cmd)
- cmd=self.modify_connection_ethernet()
+ cmd = self.modify_connection_ethernet()
self.execute_command(cmd)
- cmd=self.up_connection()
+ cmd = self.up_connection()
return self.execute_command(cmd)
else:
- cmd=self.create_connection_ethernet()
+ cmd = self.create_connection_ethernet()
return self.execute_command(cmd)
- elif self.type=='bridge':
- cmd=self.create_connection_bridge()
- elif self.type=='vlan':
- cmd=self.create_connection_vlan()
+ elif self.type == 'bridge':
+ cmd = self.create_connection_bridge()
+ elif self.type == 'vlan':
+ cmd = self.create_connection_vlan()
return self.execute_command(cmd)
def remove_connection(self):
# self.down_connection()
- cmd=[self.module.get_bin_path('nmcli', True)]
+ cmd = [self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('del')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def modify_connection(self):
- cmd=[]
- if self.type=='team':
- cmd=self.modify_connection_team()
- elif self.type=='team-slave':
- cmd=self.modify_connection_team_slave()
- elif self.type=='bond':
- cmd=self.modify_connection_bond()
- elif self.type=='bond-slave':
- cmd=self.modify_connection_bond_slave()
- elif self.type=='ethernet':
- cmd=self.modify_connection_ethernet()
- elif self.type=='bridge':
- cmd=self.modify_connection_bridge()
- elif self.type=='vlan':
- cmd=self.modify_connection_vlan()
+ cmd = []
+ if self.type == 'team':
+ cmd = self.modify_connection_team()
+ elif self.type == 'team-slave':
+ cmd = self.modify_connection_team_slave()
+ elif self.type == 'bond':
+ cmd = self.modify_connection_bond()
+ elif self.type == 'bond-slave':
+ cmd = self.modify_connection_bond_slave()
+ elif self.type == 'ethernet':
+ cmd = self.modify_connection_ethernet()
+ elif self.type == 'bridge':
+ cmd = self.modify_connection_bridge()
+ elif self.type == 'vlan':
+ cmd = self.modify_connection_vlan()
return self.execute_command(cmd)
def main():
# Parsing argument file
- module=AnsibleModule(
+ module = AnsibleModule(
argument_spec=dict(
autoconnect=dict(required=False, default=None, type='bool'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
@@ -1132,57 +1131,57 @@ def main():
if not HAVE_NM_CLIENT:
module.fail_json(msg="This module requires NetworkManager glib API")
- nmcli=Nmcli(module)
+ nmcli = Nmcli(module)
- rc=None
- out=''
- err=''
- result={}
- result['conn_name']=nmcli.conn_name
- result['state']=nmcli.state
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['conn_name'] = nmcli.conn_name
+ result['state'] = nmcli.state
# check for issues
if nmcli.conn_name is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
# team-slave checks
- if nmcli.type=='team-slave' and nmcli.master is None:
+ if nmcli.type == 'team-slave' and nmcli.master is None:
nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing")
- if nmcli.type=='team-slave' and nmcli.ifname is None:
+ if nmcli.type == 'team-slave' and nmcli.ifname is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
- if nmcli.state=='absent':
+ if nmcli.state == 'absent':
if nmcli.connection_exists():
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err)=nmcli.down_connection()
- (rc, out, err)=nmcli.remove_connection()
- if rc!=0:
- module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ (rc, out, err) = nmcli.down_connection()
+ (rc, out, err) = nmcli.remove_connection()
+ if rc != 0:
+ module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
- elif nmcli.state=='present':
+ elif nmcli.state == 'present':
if nmcli.connection_exists():
# modify connection (note: this function is check mode aware)
# result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
- result['Exists']='Connections do exist so we are modifying them'
+ result['Exists'] = 'Connections do exist so we are modifying them'
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err)=nmcli.modify_connection()
+ (rc, out, err) = nmcli.modify_connection()
if not nmcli.connection_exists():
- result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err)=nmcli.create_connection()
- if rc is not None and rc!=0:
+ (rc, out, err) = nmcli.create_connection()
+ if rc is not None and rc != 0:
module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
if rc is None:
- result['changed']=False
+ result['changed'] = False
else:
- result['changed']=True
+ result['changed'] = True
if out:
- result['stdout']=out
+ result['stdout'] = out
if err:
- result['stderr']=err
+ result['stderr'] = err
module.exit_json(**result)
diff --git a/lib/ansible/modules/net_tools/omapi_host.py b/lib/ansible/modules/net_tools/omapi_host.py
index 468296dfa7..e6f60a00ee 100644
--- a/lib/ansible/modules/net_tools/omapi_host.py
+++ b/lib/ansible/modules/net_tools/omapi_host.py
@@ -234,7 +234,7 @@ class OmapiHostManager:
fields_to_update = {}
if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
- unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
+ unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
# Name cannot be changed
diff --git a/lib/ansible/modules/net_tools/snmp_facts.py b/lib/ansible/modules/net_tools/snmp_facts.py
index 8a89f9f806..84c905dcf6 100644
--- a/lib/ansible/modules/net_tools/snmp_facts.py
+++ b/lib/ansible/modules/net_tools/snmp_facts.py
@@ -102,32 +102,32 @@ from ansible.module_utils._text import to_text
class DefineOid(object):
- def __init__(self,dotprefix=False):
+ def __init__(self, dotprefix=False):
if dotprefix:
dp = "."
else:
dp = ""
# From SNMPv2-MIB
- self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
- self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
- self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
- self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
# From IF-MIB
- self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
- self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
- self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
- self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
- self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
- self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
# From IP-MIB
- self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
@@ -141,6 +141,7 @@ def decode_hex(hexstring):
else:
return hexstring
+
def decode_mac(hexstring):
if len(hexstring) != 14:
@@ -150,17 +151,19 @@ def decode_mac(hexstring):
else:
return hexstring
+
def lookup_adminstatus(int_adminstatus):
adminstatus_options = {
1: 'up',
2: 'down',
3: 'testing'
- }
+ }
if int_adminstatus in adminstatus_options:
return adminstatus_options[int_adminstatus]
else:
return ""
+
def lookup_operstatus(int_operstatus):
operstatus_options = {
1: 'up',
@@ -170,12 +173,13 @@ def lookup_operstatus(int_operstatus):
5: 'dormant',
6: 'notPresent',
7: 'lowerLayerDown'
- }
+ }
if int_operstatus in operstatus_options:
return operstatus_options[int_operstatus]
else:
return ""
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -189,7 +193,7 @@ def main():
authkey=dict(required=False),
privkey=dict(required=False),
removeplaceholder=dict(required=False)),
- required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
+ required_together=(['username', 'level', 'integrity', 'authkey'], ['privacy', 'privkey'],),
supports_check_mode=False)
m_args = module.params
@@ -211,7 +215,6 @@ def main():
if m_args['level'] == "authPriv" and m_args['privacy'] is None:
module.fail_json(msg='Privacy algorithm not set when using authPriv')
-
if m_args['integrity'] == "sha":
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
elif m_args['integrity'] == "md5":
@@ -240,7 +243,7 @@ def main():
# Use v without a prefix to use with return values
v = DefineOid(dotprefix=False)
- Tree = lambda: defaultdict(Tree)
+ def Tree(): return defaultdict(Tree)
results = Tree()
@@ -256,7 +259,6 @@ def main():
lookupMib=False
)
-
if errorIndication:
module.fail_json(msg=str(errorIndication))
@@ -294,7 +296,6 @@ def main():
lookupMib=False
)
-
if errorIndication:
module.fail_json(msg=str(errorIndication))
@@ -351,9 +352,9 @@ def main():
for ipv4_network in ipv4_networks:
current_interface = ipv4_networks[ipv4_network]['interface']
current_network = {
- 'address': ipv4_networks[ipv4_network]['address'],
- 'netmask': ipv4_networks[ipv4_network]['netmask']
- }
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
if not current_interface in interface_to_ipv4:
interface_to_ipv4[current_interface] = []
interface_to_ipv4[current_interface].append(current_network)
diff --git a/lib/ansible/modules/network/a10/a10_server_axapi3.py b/lib/ansible/modules/network/a10/a10_server_axapi3.py
index 7fe6accbf2..64b5a3540d 100644
--- a/lib/ansible/modules/network/a10/a10_server_axapi3.py
+++ b/lib/ansible/modules/network/a10/a10_server_axapi3.py
@@ -171,7 +171,6 @@ def main():
# validate the ports data structure
validate_ports(module, slb_server_ports)
-
json_post = {
"server-list": [
{
@@ -188,7 +187,7 @@ def main():
if slb_server_status:
json_post['server-list'][0]['action'] = slb_server_status
- slb_server_data = axapi_call_v3(module, axapi_base_url+'slb/server/', method='GET', body='', signature=signature)
+ slb_server_data = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='GET', body='', signature=signature)
# for empty slb server list
if axapi_failure(slb_server_data):
@@ -203,7 +202,7 @@ def main():
changed = False
if operation == 'create':
if slb_server_exists is False:
- result = axapi_call_v3(module, axapi_base_url+'slb/server/', method='POST', body=json.dumps(json_post), signature=signature)
+ result = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='POST', body=json.dumps(json_post), signature=signature)
if axapi_failure(result):
module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg'])
changed = True
@@ -234,7 +233,7 @@ def main():
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
- write_result = axapi_call_v3(module, axapi_base_url+'write/memory/', method='POST', body='', signature=signature)
+ write_result = axapi_call_v3(module, axapi_base_url + 'write/memory/', method='POST', body='', signature=signature)
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
diff --git a/lib/ansible/modules/network/a10/a10_virtual_server.py b/lib/ansible/modules/network/a10/a10_virtual_server.py
index 560f16a8c2..c007a9128a 100644
--- a/lib/ansible/modules/network/a10/a10_virtual_server.py
+++ b/lib/ansible/modules/network/a10/a10_virtual_server.py
@@ -102,7 +102,7 @@ content:
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
- axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
+ axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
@@ -146,6 +146,7 @@ def validate_ports(module, ports):
if 'service_group' not in item:
item['service_group'] = ''
+
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
diff --git a/lib/ansible/modules/network/aos/aos_asn_pool.py b/lib/ansible/modules/network/aos/aos_asn_pool.py
index d204292d2f..1fbc9cd806 100644
--- a/lib/ansible/modules/network/aos/aos_asn_pool.py
+++ b/lib/ansible/modules/network/aos/aos_asn_pool.py
@@ -142,18 +142,19 @@ import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
+
def check_ranges_are_valid(module, ranges):
i = 1
for range in ranges:
- if not isinstance(range, list) :
+ if not isinstance(range, list):
module.fail_json(msg="Range (%i) must be a list not %s" % (i, type(range)))
elif len(range) != 2:
module.fail_json(msg="Range (%i) must be a list of 2 members, not %i" % (i, len(range)))
- elif not isinstance( range[0], int ):
- module.fail_json(msg="1st element of range (%i) must be integer instead of %s " % (i,type(range[0])))
- elif not isinstance( range[1], int ):
- module.fail_json(msg="2nd element of range (%i) must be integer instead of %s " % (i,type(range[1])))
+ elif not isinstance(range[0], int):
+ module.fail_json(msg="1st element of range (%i) must be integer instead of %s " % (i, type(range[0])))
+ elif not isinstance(range[1], int):
+ module.fail_json(msg="2nd element of range (%i) must be integer instead of %s " % (i, type(range[1])))
elif range[1] <= range[0]:
module.fail_json(msg="2nd element of range (%i) must be bigger than 1st " % (i))
@@ -161,24 +162,26 @@ def check_ranges_are_valid(module, ranges):
return True
+
def get_list_of_range(asn_pool):
ranges = []
for range in asn_pool.value['ranges']:
- ranges.append([ range['first'], range['last']])
+ ranges.append([range['first'], range['last']])
return ranges
+
def create_new_asn_pool(asn_pool, name, ranges):
# Create value
datum = dict(display_name=name, ranges=[])
for range in ranges:
- datum['ranges'].append(dict(first=range[0],last=range[1]))
+ datum['ranges'].append(dict(first=range[0], last=range[1]))
asn_pool.datum = datum
- ## Write to AOS
+ # Write to AOS
return asn_pool.write()
@@ -190,7 +193,7 @@ def asn_pool_absent(module, aos, my_pool):
if my_pool.exists is False:
module.exit_json(changed=False, name=margs['name'], id='', value={})
- ## Check if object is currently in Use or Not
+ # Check if object is currently in Use or Not
# If in Use, return an error
if my_pool.value:
if my_pool.value['status'] != 'not_in_use':
@@ -205,10 +208,10 @@ def asn_pool_absent(module, aos, my_pool):
except:
module.fail_json(msg="An error occurred, while trying to delete the ASN Pool")
- module.exit_json( changed=True,
- name=my_pool.name,
- id=my_pool.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_pool.name,
+ id=my_pool.id,
+ value={})
def asn_pool_present(module, aos, my_pool):
@@ -236,10 +239,10 @@ def asn_pool_present(module, aos, my_pool):
except:
module.fail_json(msg="An error occurred while trying to create a new ASN Pool ")
- module.exit_json( changed=True,
- name=my_pool.name,
- id=my_pool.id,
- value=my_pool.value )
+ module.exit_json(changed=True,
+ name=my_pool.name,
+ id=my_pool.id,
+ value=my_pool.value)
# Currently only check if the pool exist or not
# if exist return change false
@@ -248,14 +251,16 @@ def asn_pool_present(module, aos, my_pool):
# if pool already exist, check if list of ASN is the same
# if same just return the object and report change false
# if set(get_list_of_range(my_pool)) == set(margs['ranges']):
- module.exit_json( changed=False,
- name=my_pool.name,
- id=my_pool.id,
- value=my_pool.value )
+ module.exit_json(changed=False,
+ name=my_pool.name,
+ id=my_pool.id,
+ value=my_pool.value)
# ########################################################
# Main Function
# ########################################################
+
+
def asn_pool(module):
margs = module.params
@@ -271,7 +276,7 @@ def asn_pool(module):
# Check ID / Name and Content
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -293,8 +298,8 @@ def asn_pool(module):
# ----------------------------------------------------
try:
my_pool = find_collection_item(aos.AsnPools,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
@@ -309,19 +314,20 @@ def asn_pool(module):
asn_pool_present(module, aos, my_pool)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present"),
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present"),
ranges=dict(required=False, type="list", default=[])
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_blueprint.py b/lib/ansible/modules/network/aos/aos_blueprint.py
index 7ae32043c1..f61f9ba203 100644
--- a/lib/ansible/modules/network/aos/aos_blueprint.py
+++ b/lib/ansible/modules/network/aos/aos_blueprint.py
@@ -136,6 +136,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, check_aos_version, find_collection_item
from ansible.module_utils.pycompat24 import get_exception
+
def create_blueprint(module, aos, name):
margs = module.params
@@ -152,7 +153,7 @@ def create_blueprint(module, aos, name):
exc = get_exception()
msg = "Unable to create blueprint: %s" % exc.message
if 'UNPROCESSABLE ENTITY' in exc.message:
- msg+= ' (likely missing dependencies)'
+ msg += ' (likely missing dependencies)'
module.fail_json(msg=msg)
@@ -177,6 +178,7 @@ def ensure_absent(module, aos, blueprint):
id=blueprint.id,
name=blueprint.name)
+
def ensure_present(module, aos, blueprint):
margs = module.params
@@ -211,13 +213,14 @@ def ensure_present(module, aos, blueprint):
module.exit_json(changed=True,
name=margs['name'])
+
def ensure_build_ready(module, aos, blueprint):
margs = module.params
if not blueprint.exists:
module.fail_json(msg='blueprint %s does not exist' % blueprint.name)
- if blueprint.await_build_ready(timeout=margs['timeout']*1000):
+ if blueprint.await_build_ready(timeout=margs['timeout'] * 1000):
module.exit_json(contents=blueprint.contents)
else:
module.fail_json(msg='blueprint %s has build errors',
@@ -247,8 +250,8 @@ def aos_blueprint(module):
#----------------------------------------------------
try:
my_blueprint = find_collection_item(aos.Blueprints,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
@@ -273,7 +276,7 @@ def main():
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
- id=dict(required=False ),
+ id=dict(required=False),
state=dict(choices=[
'present', 'absent', 'build-ready'],
default='present'),
@@ -281,7 +284,7 @@ def main():
template=dict(required=False),
reference_arch=dict(required=False)
),
- mutually_exclusive = [('name', 'id')],
+ mutually_exclusive=[('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_blueprint_param.py b/lib/ansible/modules/network/aos/aos_blueprint_param.py
index a2417dbe0b..e5d1bd0b42 100644
--- a/lib/ansible/modules/network/aos/aos_blueprint_param.py
+++ b/lib/ansible/modules/network/aos/aos_blueprint_param.py
@@ -189,6 +189,7 @@ param_map_list = dict(
)
)
+
def get_collection_from_param_map(module, aos):
param_map = None
@@ -220,6 +221,7 @@ def get_collection_from_param_map(module, aos):
return None
+
def blueprint_param_present(module, aos, blueprint, param, param_value):
margs = module.params
@@ -278,6 +280,7 @@ def blueprint_param_absent(module, aos, blueprint, param, param_value):
name=param.name,
value=param.value)
+
def blueprint_param(module):
margs = module.params
@@ -295,15 +298,15 @@ def blueprint_param(module):
# --------------------------------------------------------------------
try:
blueprint = find_collection_item(aos.Blueprints,
- item_name=margs['blueprint'],
- item_id=margs['blueprint'])
+ item_name=margs['blueprint'],
+ item_id=margs['blueprint'])
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
if blueprint.exists is False:
module.fail_json(msg='Blueprint %s does not exist.\n'
- 'known blueprints are [%s]'%
- (margs['blueprint'],','.join(aos.Blueprints.names)))
+ 'known blueprints are [%s]' %
+ (margs['blueprint'], ','.join(aos.Blueprints.names)))
# --------------------------------------------------------------------
# If get_param_list is defined, build the list of supported parameters
@@ -316,8 +319,8 @@ def blueprint_param(module):
params_list[param] = blueprint.params[param].info
module.exit_json(changed=False,
- blueprint= blueprint.name,
- params_list=params_list )
+ blueprint=blueprint.name,
+ params_list=params_list)
# --------------------------------------------------------------------
# Check Param name, return an error if not supported by this blueprint
@@ -325,7 +328,7 @@ def blueprint_param(module):
if margs['name'] in blueprint.params.names:
param = blueprint.params[margs['name']]
else:
- module.fail_json(msg='unable to access param %s' % margs['name'] )
+ module.fail_json(msg='unable to access param %s' % margs['name'])
# --------------------------------------------------------------------
# Check if param_value needs to be converted to an object
@@ -350,6 +353,7 @@ def blueprint_param(module):
blueprint_param_present(module, aos, blueprint, param, param_value)
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -359,7 +363,7 @@ def main():
name=dict(required=False),
value=dict(required=False, type="dict"),
param_map=dict(required=False),
- state=dict( choices=['present', 'absent'], default='present')
+ state=dict(choices=['present', 'absent'], default='present')
),
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_blueprint_virtnet.py b/lib/ansible/modules/network/aos/aos_blueprint_virtnet.py
index 12441494b9..62937efbca 100644
--- a/lib/ansible/modules/network/aos/aos_blueprint_virtnet.py
+++ b/lib/ansible/modules/network/aos/aos_blueprint_virtnet.py
@@ -88,6 +88,7 @@ from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
+
def ensure_present(module, aos, blueprint, virtnet):
# if exist already return tru
@@ -104,7 +105,7 @@ def ensure_present(module, aos, blueprint, virtnet):
virtnet.create(module.params['content'])
except:
e = get_exception()
- module.fail_json(msg="unable to create virtual-network : %r" % e )
+ module.fail_json(msg="unable to create virtual-network : %r" % e)
module.exit_json(changed=True,
blueprint=blueprint.name,
@@ -121,7 +122,7 @@ def ensure_absent(module, aos, blueprint, virtnet):
virtnet.delete()
except:
e = get_exception()
- module.fail_json(msg="unable to delete virtual-network %s : %r" % (virtnet.name, e) )
+ module.fail_json(msg="unable to delete virtual-network %s : %r" % (virtnet.name, e))
module.exit_json(changed=True,
blueprint=blueprint.name)
@@ -130,6 +131,7 @@ def ensure_absent(module, aos, blueprint, virtnet):
module.exit_json(changed=False,
blueprint=blueprint.name)
+
def blueprint_virtnet(module):
margs = module.params
@@ -147,22 +149,22 @@ def blueprint_virtnet(module):
# --------------------------------------------------------------------
try:
blueprint = find_collection_item(aos.Blueprints,
- item_name=margs['blueprint'],
- item_id=margs['blueprint'])
+ item_name=margs['blueprint'],
+ item_id=margs['blueprint'])
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
if blueprint.exists is False:
module.fail_json(msg='Blueprint %s does not exist.\n'
- 'known blueprints are [%s]'%
- (margs['blueprint'],','.join(aos.Blueprints.names)))
+ 'known blueprints are [%s]' %
+ (margs['blueprint'], ','.join(aos.Blueprints.names)))
# --------------------------------------------------------------------
# Convert "content" to dict and extract name
# --------------------------------------------------------------------
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -179,7 +181,7 @@ def blueprint_virtnet(module):
virtnet = blueprint.VirtualNetworks[item_name]
except:
module.fail_json(msg="Something went wrong while trying to find Virtual Network %s in blueprint %s"
- % ( item_name, blueprint.name ))
+ % (item_name, blueprint.name))
# --------------------------------------------------------------------
# Proceed based on State value
@@ -198,11 +200,11 @@ def main():
argument_spec=dict(
session=dict(required=True, type="dict"),
blueprint=dict(required=True),
- name=dict(required=False ),
+ name=dict(required=False),
content=dict(required=False, type="json"),
state=dict(choices=['present', 'absent'], default='present')
),
- mutually_exclusive = [('name', 'content')],
+ mutually_exclusive=[('name', 'content')],
required_one_of=[('name', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_device.py b/lib/ansible/modules/network/aos/aos_device.py
index a527fe6921..717083d774 100644
--- a/lib/ansible/modules/network/aos/aos_device.py
+++ b/lib/ansible/modules/network/aos/aos_device.py
@@ -132,7 +132,7 @@ def aos_device_normal(module, aos, dev):
value=dev.value)
else:
# Check if the device is online
- if dev.state in ('OOS-READY','IS-READY'):
+ if dev.state in ('OOS-READY', 'IS-READY'):
module.exit_json(changed=False,
name=dev.name,
id=dev.id,
@@ -140,6 +140,7 @@ def aos_device_normal(module, aos, dev):
else:
module.fail_json(msg="Device is in '%s' state" % dev.state)
+
def aos_device(module):
margs = module.params
@@ -161,8 +162,8 @@ def aos_device(module):
# Find Object if available based on ID or Name
#----------------------------------------------------
dev = find_collection_item(aos.Devices,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
if dev.exists is False:
module.fail_json(msg="unknown device '%s'" % margs['name'])
@@ -189,6 +190,7 @@ def aos_device(module):
if margs['state'] == 'normal':
aos_device_normal(module, aos, dev)
+
def main():
module = AnsibleModule(
@@ -196,12 +198,12 @@ def main():
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
- state=dict( choices=['normal'],
- default='normal'),
- approve=dict( required=False, type='bool' ),
- location=dict( required=False, default='')
+ state=dict(choices=['normal'],
+ default='normal'),
+ approve=dict(required=False, type='bool'),
+ location=dict(required=False, default='')
),
- mutually_exclusive = [('name', 'id')],
+ mutually_exclusive=[('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_external_router.py b/lib/ansible/modules/network/aos/aos_external_router.py
index fc0abeac5e..e693811aab 100644
--- a/lib/ansible/modules/network/aos/aos_external_router.py
+++ b/lib/ansible/modules/network/aos/aos_external_router.py
@@ -155,16 +155,18 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
def create_new_ext_router(module, my_ext_router, name, loopback, asn):
# Create value
- datum = dict(display_name=name, address=loopback, asn=asn )
+ datum = dict(display_name=name, address=loopback, asn=asn)
my_ext_router.datum = datum
- ## Write to AOS
+ # Write to AOS
return my_ext_router.write()
#########################################################
# State Processing
#########################################################
+
+
def ext_router_absent(module, aos, my_ext_router):
margs = module.params
@@ -174,7 +176,7 @@ def ext_router_absent(module, aos, my_ext_router):
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
- value={} )
+ value={})
# If not in check mode, delete External Router
if not module.check_mode:
@@ -185,10 +187,11 @@ def ext_router_absent(module, aos, my_ext_router):
except:
module.fail_json(msg="An error occurred, while trying to delete the External Router")
- module.exit_json( changed=True,
- name=my_ext_router.name,
- id=my_ext_router.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_ext_router.name,
+ id=my_ext_router.id,
+ value={})
+
def ext_router_present(module, aos, my_ext_router):
@@ -210,16 +213,15 @@ def ext_router_present(module, aos, my_ext_router):
my_ext_router,
margs['name'],
margs['loopback'],
- margs['asn'] )
+ margs['asn'])
my_ext_router = my_new_ext_router
except:
module.fail_json(msg="An error occurred while trying to create a new External Router")
-
- module.exit_json( changed=True,
- name=my_ext_router.name,
- id=my_ext_router.id,
- value=my_ext_router.value )
+ module.exit_json(changed=True,
+ name=my_ext_router.name,
+ id=my_ext_router.id,
+ value=my_ext_router.value)
# if external Router already exist, check if loopback and ASN are the same
# if same just return the object and report change false
@@ -249,14 +251,16 @@ def ext_router_present(module, aos, my_ext_router):
if int(asn) != int(my_ext_router.value['asn']):
module.fail_json(msg="my_ext_router already exist but ASN is different, currently not supported to update a module")
- module.exit_json( changed=False,
- name=my_ext_router.name,
- id=my_ext_router.id,
- value=my_ext_router.value )
+ module.exit_json(changed=False,
+ name=my_ext_router.name,
+ id=my_ext_router.id,
+ value=my_ext_router.value)
#########################################################
# Main Function
#########################################################
+
+
def ext_router(module):
margs = module.params
@@ -271,7 +275,7 @@ def ext_router(module):
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -289,8 +293,8 @@ def ext_router(module):
#----------------------------------------------------
try:
my_ext_router = find_collection_item(aos.ExternalRouters,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
@@ -305,20 +309,21 @@ def ext_router(module):
ext_router_present(module, aos, my_ext_router)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present"),
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present"),
loopback=dict(required=False),
asn=dict(required=False)
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_ip_pool.py b/lib/ansible/modules/network/aos/aos_ip_pool.py
index 8a8e35c681..41c7e16754 100644
--- a/lib/ansible/modules/network/aos/aos_ip_pool.py
+++ b/lib/ansible/modules/network/aos/aos_ip_pool.py
@@ -167,6 +167,7 @@ import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
+
def get_list_of_subnets(ip_pool):
subnets = []
@@ -175,6 +176,7 @@ def get_list_of_subnets(ip_pool):
return subnets
+
def create_new_ip_pool(ip_pool, name, subnets):
# Create value
@@ -184,12 +186,14 @@ def create_new_ip_pool(ip_pool, name, subnets):
ip_pool.datum = datum
- ## Write to AOS
+ # Write to AOS
return ip_pool.write()
#########################################################
# State Processing
#########################################################
+
+
def ip_pool_absent(module, aos, my_pool):
margs = module.params
@@ -198,7 +202,7 @@ def ip_pool_absent(module, aos, my_pool):
if my_pool.exists is False:
module.exit_json(changed=False, name=margs['name'], id='', value={})
- ## Check if object is currently in Use or Not
+ # Check if object is currently in Use or Not
# If in Use, return an error
if my_pool.value:
if my_pool.value['status'] != 'not_in_use':
@@ -213,10 +217,11 @@ def ip_pool_absent(module, aos, my_pool):
except:
module.fail_json(msg="An error occurred, while trying to delete the IP Pool")
- module.exit_json( changed=True,
- name=my_pool.name,
- id=my_pool.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_pool.name,
+ id=my_pool.id,
+ value={})
+
def ip_pool_present(module, aos, my_pool):
@@ -248,24 +253,26 @@ def ip_pool_present(module, aos, my_pool):
except:
module.fail_json(msg="An error occurred while trying to create a new IP Pool ")
- module.exit_json( changed=True,
- name=my_pool.name,
- id=my_pool.id,
- value=my_pool.value )
+ module.exit_json(changed=True,
+ name=my_pool.name,
+ id=my_pool.id,
+ value=my_pool.value)
# if pool already exist, check if list of network is the same
# if same just return the object and report change false
if set(get_list_of_subnets(my_pool)) == set(margs['subnets']):
- module.exit_json( changed=False,
- name=my_pool.name,
- id=my_pool.id,
- value=my_pool.value )
+ module.exit_json(changed=False,
+ name=my_pool.name,
+ id=my_pool.id,
+ value=my_pool.value)
else:
module.fail_json(msg="ip_pool already exist but value is different, currently not supported to update a module")
#########################################################
# Main Function
#########################################################
+
+
def ip_pool(module):
margs = module.params
@@ -280,7 +287,7 @@ def ip_pool(module):
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -298,8 +305,8 @@ def ip_pool(module):
#----------------------------------------------------
try:
my_pool = find_collection_item(aos.IpPools,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
@@ -314,19 +321,20 @@ def ip_pool(module):
ip_pool_present(module, aos, my_pool)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present"),
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present"),
subnets=dict(required=False, type="list")
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_logical_device.py b/lib/ansible/modules/network/aos/aos_logical_device.py
index abcf99a333..76412dfc33 100644
--- a/lib/ansible/modules/network/aos/aos_logical_device.py
+++ b/lib/ansible/modules/network/aos/aos_logical_device.py
@@ -134,6 +134,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
#########################################################
# State Processing
#########################################################
+
+
def logical_device_absent(module, aos, my_logical_dev):
margs = module.params
@@ -154,10 +156,11 @@ def logical_device_absent(module, aos, my_logical_dev):
except:
module.fail_json(msg="An error occurred, while trying to delete the Logical Device")
- module.exit_json( changed=True,
- name=my_logical_dev.name,
- id=my_logical_dev.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_logical_dev.name,
+ id=my_logical_dev.id,
+ value={})
+
def logical_device_present(module, aos, my_logical_dev):
@@ -174,14 +177,16 @@ def logical_device_present(module, aos, my_logical_dev):
if my_logical_dev.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
- module.exit_json( changed=False,
- name=my_logical_dev.name,
- id=my_logical_dev.id,
- value=my_logical_dev.value )
+ module.exit_json(changed=False,
+ name=my_logical_dev.name,
+ id=my_logical_dev.id,
+ value=my_logical_dev.value)
#########################################################
# Main Function
#########################################################
+
+
def logical_device(module):
margs = module.params
@@ -196,7 +201,7 @@ def logical_device(module):
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -227,18 +232,19 @@ def logical_device(module):
logical_device_present(module, aos, my_logical_dev)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present")
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present")
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_logical_device_map.py b/lib/ansible/modules/network/aos/aos_logical_device_map.py
index 474feb6871..44d089b784 100644
--- a/lib/ansible/modules/network/aos/aos_logical_device_map.py
+++ b/lib/ansible/modules/network/aos/aos_logical_device_map.py
@@ -154,6 +154,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
#########################################################
# State Processing
#########################################################
+
+
def logical_device_map_absent(module, aos, my_log_dev_map):
margs = module.params
@@ -172,10 +174,11 @@ def logical_device_map_absent(module, aos, my_log_dev_map):
except:
module.fail_json(msg="An error occurred, while trying to delete the Logical Device Map")
- module.exit_json( changed=True,
- name=my_log_dev_map.name,
- id=my_log_dev_map.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_log_dev_map.name,
+ id=my_log_dev_map.id,
+ value={})
+
def logical_device_map_present(module, aos, my_log_dev_map):
@@ -194,14 +197,16 @@ def logical_device_map_present(module, aos, my_log_dev_map):
if my_log_dev_map.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'Content' is mandatory for module that don't exist currently")
- module.exit_json( changed=False,
- name=my_log_dev_map.name,
- id=my_log_dev_map.id,
- value=my_log_dev_map.value )
+ module.exit_json(changed=False,
+ name=my_log_dev_map.name,
+ id=my_log_dev_map.id,
+ value=my_log_dev_map.value)
#########################################################
# Main Function
#########################################################
+
+
def logical_device_map(module):
margs = module.params
@@ -216,7 +221,7 @@ def logical_device_map(module):
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -234,8 +239,8 @@ def logical_device_map(module):
#----------------------------------------------------
try:
my_log_dev_map = find_collection_item(aos.LogicalDeviceMaps,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
except:
module.fail_json(msg="Unable to find the Logical Device Map based on name or ID, something went wrong")
@@ -250,18 +255,19 @@ def logical_device_map(module):
logical_device_map_present(module, aos, my_log_dev_map)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present")
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present")
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_rack_type.py b/lib/ansible/modules/network/aos/aos_rack_type.py
index e6b4963f32..3be22595c1 100644
--- a/lib/ansible/modules/network/aos/aos_rack_type.py
+++ b/lib/ansible/modules/network/aos/aos_rack_type.py
@@ -133,6 +133,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
#########################################################
# State Processing
#########################################################
+
+
def rack_type_absent(module, aos, my_rack_type):
margs = module.params
@@ -151,10 +153,11 @@ def rack_type_absent(module, aos, my_rack_type):
except:
module.fail_json(msg="An error occurred, while trying to delete the Rack Type")
- module.exit_json( changed=True,
- name=my_rack_type.name,
- id=my_rack_type.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_rack_type.name,
+ id=my_rack_type.id,
+ value={})
+
def rack_type_present(module, aos, my_rack_type):
@@ -171,14 +174,16 @@ def rack_type_present(module, aos, my_rack_type):
if my_rack_type.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
- module.exit_json( changed=False,
- name=my_rack_type.name,
- id=my_rack_type.id,
- value=my_rack_type.value )
+ module.exit_json(changed=False,
+ name=my_rack_type.name,
+ id=my_rack_type.id,
+ value=my_rack_type.value)
#########################################################
# Main Function
#########################################################
+
+
def rack_type(module):
margs = module.params
@@ -193,7 +198,7 @@ def rack_type(module):
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -210,8 +215,8 @@ def rack_type(module):
# Find Object if available based on ID or Name
#----------------------------------------------------
my_rack_type = find_collection_item(aos.RackTypes,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
#----------------------------------------------------
# Proceed based on State value
@@ -224,18 +229,19 @@ def rack_type(module):
rack_type_present(module, aos, my_rack_type)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present")
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present")
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/aos/aos_template.py b/lib/ansible/modules/network/aos/aos_template.py
index 26f19dc5f7..c87dee2ca1 100644
--- a/lib/ansible/modules/network/aos/aos_template.py
+++ b/lib/ansible/modules/network/aos/aos_template.py
@@ -143,6 +143,8 @@ from ansible.module_utils.network.aos.aos import get_aos_session, find_collectio
#########################################################
# State Processing
#########################################################
+
+
def template_absent(module, aos, my_template):
margs = module.params
@@ -163,10 +165,11 @@ def template_absent(module, aos, my_template):
except:
module.fail_json(msg="An error occurred, while trying to delete the Template")
- module.exit_json( changed=True,
- name=my_template.name,
- id=my_template.id,
- value={} )
+ module.exit_json(changed=True,
+ name=my_template.name,
+ id=my_template.id,
+ value={})
+
def template_present(module, aos, my_template):
@@ -186,10 +189,10 @@ def template_present(module, aos, my_template):
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
# if module already exist, just return it
- module.exit_json( changed=False,
- name=my_template.name,
- id=my_template.id,
- value=my_template.value )
+ module.exit_json(changed=False,
+ name=my_template.name,
+ id=my_template.id,
+ value=my_template.value)
#########################################################
@@ -209,7 +212,7 @@ def aos_template(module):
if margs['content'] is not None:
- content = content_to_dict(module, margs['content'] )
+ content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
@@ -227,8 +230,8 @@ def aos_template(module):
#----------------------------------------------------
try:
my_template = find_collection_item(aos.DesignTemplates,
- item_name=item_name,
- item_id=item_id)
+ item_name=item_name,
+ item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
@@ -243,18 +246,19 @@ def aos_template(module):
template_present(module, aos, my_template)
+
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
- name=dict(required=False ),
- id=dict(required=False ),
+ name=dict(required=False),
+ id=dict(required=False),
content=dict(required=False, type="json"),
- state=dict( required=False,
- choices=['present', 'absent'],
- default="present")
+ state=dict(required=False,
+ choices=['present', 'absent'],
+ default="present")
),
- mutually_exclusive = [('name', 'id', 'content')],
+ mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
diff --git a/lib/ansible/modules/network/asa/asa_acl.py b/lib/ansible/modules/network/asa/asa_acl.py
index 82ded9985a..0f5c51d5c5 100644
--- a/lib/ansible/modules/network/asa/asa_acl.py
+++ b/lib/ansible/modules/network/asa/asa_acl.py
@@ -151,6 +151,7 @@ def get_acl_config(module, acl_name):
return NetworkConfig(indent=1, contents='\n'.join(filtered_config))
+
def parse_acl_name(module):
first_line = True
for line in module.params['lines']:
@@ -168,6 +169,7 @@ def parse_acl_name(module):
return acl_name
+
def main():
argument_spec = dict(
diff --git a/lib/ansible/modules/network/asa/asa_command.py b/lib/ansible/modules/network/asa/asa_command.py
index 9a79f5137e..06b19c77f0 100644
--- a/lib/ansible/modules/network/asa/asa_command.py
+++ b/lib/ansible/modules/network/asa/asa_command.py
@@ -187,7 +187,6 @@ def main():
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
-
result.update({
'changed': False,
'stdout': responses,
diff --git a/lib/ansible/modules/network/asa/asa_config.py b/lib/ansible/modules/network/asa/asa_config.py
index dca23567ca..4ba96aef4f 100644
--- a/lib/ansible/modules/network/asa/asa_config.py
+++ b/lib/ansible/modules/network/asa/asa_config.py
@@ -201,7 +201,6 @@ from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils._text import to_native
-
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
@@ -211,6 +210,7 @@ def get_candidate(module):
candidate.add(module.params['lines'], parents=parents)
return candidate
+
def run(module, result):
match = module.params['match']
replace = module.params['replace']
@@ -251,6 +251,7 @@ def run(module, result):
run_commands(module, 'write mem')
result['changed'] = True
+
def main():
""" main entry point for module execution
"""
@@ -293,7 +294,6 @@ def main():
config = None
-
if module.params['backup']:
result['__backup__'] = get_config(module)
diff --git a/lib/ansible/modules/network/bigswitch/bigmon_chain.py b/lib/ansible/modules/network/bigswitch/bigmon_chain.py
index 49157506f5..2c6f66525e 100755
--- a/lib/ansible/modules/network/bigswitch/bigmon_chain.py
+++ b/lib/ansible/modules/network/bigswitch/bigmon_chain.py
@@ -80,8 +80,8 @@ def chain(module):
controller = module.params['controller']
rest = Rest(module,
- {'content-type': 'application/json', 'Cookie': 'session_cookie='+access_token},
- 'https://'+controller+':8443/api/v1/data/controller/applications/bigchain')
+ {'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token},
+ 'https://' + controller + ':8443/api/v1/data/controller/applications/bigchain')
if None in (name, state, controller):
module.fail_json(msg='parameter `name` is missing')
@@ -115,6 +115,7 @@ def chain(module):
else:
module.fail_json(msg="error deleting chain '{}': {}".format(name, response.json['description']))
+
def main():
module = AnsibleModule(
argument_spec=dict(
diff --git a/lib/ansible/modules/network/bigswitch/bigmon_policy.py b/lib/ansible/modules/network/bigswitch/bigmon_policy.py
index 7b4f0303eb..7aefc644cf 100644
--- a/lib/ansible/modules/network/bigswitch/bigmon_policy.py
+++ b/lib/ansible/modules/network/bigswitch/bigmon_policy.py
@@ -113,8 +113,8 @@ def policy(module):
controller = module.params['controller']
rest = Rest(module,
- {'content-type': 'application/json', 'Cookie': 'session_cookie='+access_token},
- 'https://'+controller+':8443/api/v1/data/controller/applications/bigtap')
+ {'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token},
+ 'https://' + controller + ':8443/api/v1/data/controller/applications/bigtap')
if name is None:
module.fail_json(msg='parameter `name` is missing')
@@ -127,11 +127,11 @@ def policy(module):
matching = [policy for policy in response.json
if policy['name'] == name and
- policy['duration'] == duration and
- policy['delivery-packet-count'] == delivery_packet_count and
- policy['policy-description'] == policy_description and
- policy['action'] == action and
- policy['priority'] == priority]
+ policy['duration'] == duration and
+ policy['delivery-packet-count'] == delivery_packet_count and
+ policy['policy-description'] == policy_description and
+ policy['action'] == action and
+ policy['priority'] == priority]
if matching:
config_present = True
@@ -143,9 +143,9 @@ def policy(module):
module.exit_json(changed=False)
if state in ('present'):
- data={'name': name, 'action': action, 'policy-description': policy_description,
- 'priority': priority, 'duration': duration, 'start-time': start_time,
- 'delivery-packet-count': delivery_packet_count }
+ data = {'name': name, 'action': action, 'policy-description': policy_description,
+ 'priority': priority, 'duration': duration, 'start-time': start_time,
+ 'delivery-packet-count': delivery_packet_count}
response = rest.put('policy[name="%s"]' % name, data=data)
if response.status_code == 204:
@@ -160,6 +160,7 @@ def policy(module):
else:
module.fail_json(msg="error deleting policy '{}': {}".format(name, response.json['description']))
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -168,7 +169,7 @@ def main():
action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'),
priority=dict(type='int', default=100),
duration=dict(type='int', default=0),
- start_time=dict(type='str', default=datetime.datetime.now().isoformat()+'+00:00'),
+ start_time=dict(type='str', default=datetime.datetime.now().isoformat() + '+00:00'),
delivery_packet_count=dict(type='int', default=0),
controller=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present'),
diff --git a/lib/ansible/modules/network/cnos/cnos_bgp.py b/lib/ansible/modules/network/cnos/cnos_bgp.py
index f653724e36..eca0d3ec17 100644
--- a/lib/ansible/modules/network/cnos/cnos_bgp.py
+++ b/lib/ansible/modules/network/cnos/cnos_bgp.py
@@ -469,4 +469,4 @@ def main():
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_command.py b/lib/ansible/modules/network/cnos/cnos_command.py
index 85646957f7..1ee342ce9d 100644
--- a/lib/ansible/modules/network/cnos/cnos_command.py
+++ b/lib/ansible/modules/network/cnos/cnos_command.py
@@ -163,4 +163,4 @@ def main():
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_conditional_command.py b/lib/ansible/modules/network/cnos/cnos_conditional_command.py
index ad98f85218..8360ac7455 100644
--- a/lib/ansible/modules/network/cnos/cnos_conditional_command.py
+++ b/lib/ansible/modules/network/cnos/cnos_conditional_command.py
@@ -190,4 +190,4 @@ def main():
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_image.py b/lib/ansible/modules/network/cnos/cnos_image.py
index d17f2ef9e3..77a4c7c5e6 100644
--- a/lib/ansible/modules/network/cnos/cnos_image.py
+++ b/lib/ansible/modules/network/cnos/cnos_image.py
@@ -228,4 +228,4 @@ def main():
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_interface.py b/lib/ansible/modules/network/cnos/cnos_interface.py
index 8ee5fb619d..538f2e62f3 100644
--- a/lib/ansible/modules/network/cnos/cnos_interface.py
+++ b/lib/ansible/modules/network/cnos/cnos_interface.py
@@ -585,4 +585,4 @@ def main():
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_portchannel.py b/lib/ansible/modules/network/cnos/cnos_portchannel.py
index 883602b254..af4a86b867 100644
--- a/lib/ansible/modules/network/cnos/cnos_portchannel.py
+++ b/lib/ansible/modules/network/cnos/cnos_portchannel.py
@@ -521,4 +521,4 @@ def main():
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_rollback.py b/lib/ansible/modules/network/cnos/cnos_rollback.py
index 2ad78cd7f6..46fea95198 100644
--- a/lib/ansible/modules/network/cnos/cnos_rollback.py
+++ b/lib/ansible/modules/network/cnos/cnos_rollback.py
@@ -273,4 +273,4 @@ def main():
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cnos/cnos_vlan.py b/lib/ansible/modules/network/cnos/cnos_vlan.py
index 09ccaaf27a..7ef652ddfc 100644
--- a/lib/ansible/modules/network/cnos/cnos_vlan.py
+++ b/lib/ansible/modules/network/cnos/cnos_vlan.py
@@ -296,4 +296,4 @@ def main():
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/cumulus/_cl_bond.py b/lib/ansible/modules/network/cumulus/_cl_bond.py
index f9050f47a5..d2db8306af 100644
--- a/lib/ansible/modules/network/cumulus/_cl_bond.py
+++ b/lib/ansible/modules/network/cumulus/_cl_bond.py
@@ -288,6 +288,7 @@ def conv_array_to_str(_value):
return ' '.join(_value)
return _value
+
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
diff --git a/lib/ansible/modules/network/cumulus/_cl_img_install.py b/lib/ansible/modules/network/cumulus/_cl_img_install.py
index 25072c90d2..2265847537 100644
--- a/lib/ansible/modules/network/cumulus/_cl_img_install.py
+++ b/lib/ansible/modules/network/cumulus/_cl_img_install.py
@@ -187,7 +187,6 @@ def check_fw_print_env(module, slot_num):
return m0.group(1)
-
def get_primary_slot_num(module):
cmd = None
if platform.machine() == 'ppc':
@@ -264,7 +263,7 @@ def check_sw_version(module):
if 'active' in slot:
_msg = "Version %s is installed in the active slot" \
% (_version)
- module.exit_json(changed=False, msg=_msg)
+ module.exit_json(changed=False, msg=_msg)
else:
_msg = "Version " + _version + \
" is installed in the alternate slot. "
diff --git a/lib/ansible/modules/network/cumulus/_cl_license.py b/lib/ansible/modules/network/cumulus/_cl_license.py
index a504865a23..175e79fb68 100644
--- a/lib/ansible/modules/network/cumulus/_cl_license.py
+++ b/lib/ansible/modules/network/cumulus/_cl_license.py
@@ -103,7 +103,7 @@ msg:
from ansible.module_utils.basic import AnsibleModule
-CL_LICENSE_PATH='/usr/cumulus/bin/cl-license'
+CL_LICENSE_PATH = '/usr/cumulus/bin/cl-license'
def install_license(module):
diff --git a/lib/ansible/modules/network/cumulus/_cl_ports.py b/lib/ansible/modules/network/cumulus/_cl_ports.py
index 21c3f3bf05..41e329ae29 100644
--- a/lib/ansible/modules/network/cumulus/_cl_ports.py
+++ b/lib/ansible/modules/network/cumulus/_cl_ports.py
@@ -99,7 +99,7 @@ def hash_existing_ports_conf(module):
except IOError as e:
_msg = "Failed to open %s: %s" % (PORTS_CONF, to_native(e))
module.fail_json(msg=_msg)
- return # for testing only should return on module.fail_json
+ return # for testing only should return on module.fail_json
for _line in existing_ports_conf:
_m0 = re.match(r'^(\d+)=(\w+)', _line)
@@ -129,7 +129,7 @@ def generate_new_ports_conf_hash(module):
port_setting
else:
int_range = map(int, port_range_str)
- portnum_range = range(int_range[0], int_range[1]+1)
+ portnum_range = range(int_range[0], int_range[1] + 1)
for i in portnum_range:
new_ports_conf_hash[i] = port_setting
module.new_ports_hash = new_ports_conf_hash
@@ -160,6 +160,7 @@ def make_copy_of_orig_ports_conf(module):
module.fail_json(msg=_msg)
return # for testing only
+
def write_to_ports_conf(module):
"""
use tempfile to first write out config in temp file
diff --git a/lib/ansible/modules/network/cumulus/nclu.py b/lib/ansible/modules/network/cumulus/nclu.py
index d14a825025..0f70a09dea 100644
--- a/lib/ansible/modules/network/cumulus/nclu.py
+++ b/lib/ansible/modules/network/cumulus/nclu.py
@@ -99,7 +99,7 @@ from ansible.module_utils.basic import AnsibleModule
def command_helper(module, command, errmsg=None):
"""Run a command, catch any nclu errors"""
- (_rc, output, _err) = module.run_command("/usr/bin/net %s"%command)
+ (_rc, output, _err) = module.run_command("/usr/bin/net %s" % command)
if _rc or 'ERROR' in output or 'ERROR' in _err:
module.fail_json(msg=errmsg or output)
return str(output)
@@ -141,7 +141,7 @@ def run_nclu(module, command_list, command_string, commit, atomic, abort, descri
# Run all of the net commands
output_lines = []
for line in commands:
- output_lines += [command_helper(module, line.strip(), "Failed on line %s"%line)]
+ output_lines += [command_helper(module, line.strip(), "Failed on line %s" % line)]
output = "\n".join(output_lines)
# If pending changes changed, report a change.
@@ -153,7 +153,7 @@ def run_nclu(module, command_list, command_string, commit, atomic, abort, descri
# Do the commit.
if do_commit:
- result = command_helper(module, "commit description '%s'"%description)
+ result = command_helper(module, "commit description '%s'" % description)
if "commit ignored" in result:
_changed = False
command_helper(module, "abort")
@@ -165,12 +165,12 @@ def run_nclu(module, command_list, command_string, commit, atomic, abort, descri
def main(testing=False):
module = AnsibleModule(argument_spec=dict(
- commands = dict(required=False, type='list'),
- template = dict(required=False, type='str'),
- description = dict(required=False, type='str', default="Ansible-originated commit"),
- abort = dict(required=False, type='bool', default=False),
- commit = dict(required=False, type='bool', default=False),
- atomic = dict(required=False, type='bool', default=False)),
+ commands=dict(required=False, type='list'),
+ template=dict(required=False, type='str'),
+ description=dict(required=False, type='str', default="Ansible-originated commit"),
+ abort=dict(required=False, type='bool', default=False),
+ commit=dict(required=False, type='bool', default=False),
+ atomic=dict(required=False, type='bool', default=False)),
mutually_exclusive=[('commands', 'template'),
('commit', 'atomic'),
('abort', 'atomic')]
diff --git a/lib/ansible/modules/network/eos/eos_banner.py b/lib/ansible/modules/network/eos/eos_banner.py
index 0fbaa393af..d080fef06c 100644
--- a/lib/ansible/modules/network/eos/eos_banner.py
+++ b/lib/ansible/modules/network/eos/eos_banner.py
@@ -126,6 +126,7 @@ def map_obj_to_commands(updates, module):
return commands
+
def map_config_to_obj(module):
output = run_commands(module, ['show banner %s' % module.params['banner']])
obj = {'banner': module.params['banner'], 'state': 'absent'}
@@ -144,6 +145,7 @@ def map_config_to_obj(module):
obj['state'] = 'present'
return obj
+
def map_params_to_obj(module):
text = module.params['text']
if text:
@@ -155,6 +157,7 @@ def map_params_to_obj(module):
'state': module.params['state']
}
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/eos/eos_command.py b/lib/ansible/modules/network/eos/eos_command.py
index 3fe90bd72a..7d4c4cd18b 100644
--- a/lib/ansible/modules/network/eos/eos_command.py
+++ b/lib/ansible/modules/network/eos/eos_command.py
@@ -146,6 +146,7 @@ from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
VALID_KEYS = ['command', 'output', 'prompt', 'response']
+
def to_lines(stdout):
lines = list()
for item in stdout:
@@ -154,6 +155,7 @@ def to_lines(stdout):
lines.append(item)
return lines
+
def parse_commands(module, warnings):
spec = dict(
command=dict(key=True),
@@ -176,12 +178,14 @@ def parse_commands(module, warnings):
return commands
+
def to_cli(obj):
cmd = obj['command']
if obj.get('output') == 'json':
cmd += ' | json'
return cmd
+
def main():
"""entry point for module execution
"""
diff --git a/lib/ansible/modules/network/eos/eos_config.py b/lib/ansible/modules/network/eos/eos_config.py
index f34334e104..88b3fbc4f0 100644
--- a/lib/ansible/modules/network/eos/eos_config.py
+++ b/lib/ansible/modules/network/eos/eos_config.py
@@ -403,7 +403,6 @@ def main():
result['changed'] = True
-
running_config = None
startup_config = None
diff --git a/lib/ansible/modules/network/eos/eos_eapi.py b/lib/ansible/modules/network/eos/eos_eapi.py
index f65f4403c6..22d4a8d1d8 100644
--- a/lib/ansible/modules/network/eos/eos_eapi.py
+++ b/lib/ansible/modules/network/eos/eos_eapi.py
@@ -187,6 +187,7 @@ from ansible.module_utils.network.eos.eos import run_commands, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
+
def check_transport(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
@@ -194,18 +195,22 @@ def check_transport(module):
if 'eapi' in (transport, provider_transport):
module.fail_json(msg='eos_eapi module is only supported over cli transport')
+
def validate_http_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
+
def validate_https_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
+
def validate_local_http_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
+
def validate_vrf(value, module):
out = run_commands(module, ['show vrf'])
configured_vrfs = re.findall(r'^\s+(\w+)(?=\s)', out[0], re.M)
@@ -213,11 +218,12 @@ def validate_vrf(value, module):
if value not in configured_vrfs:
module.fail_json(msg='vrf `%s` is not configured on the system' % value)
+
def map_obj_to_commands(updates, module, warnings):
commands = list()
want, have = updates
- needs_update = lambda x: want.get(x) is not None and (want.get(x) != have.get(x))
+ def needs_update(x): return want.get(x) is not None and (want.get(x) != have.get(x))
def add(cmd):
if 'management api http-commands' not in commands:
@@ -266,7 +272,6 @@ def map_obj_to_commands(updates, module, warnings):
elif want['state'] == 'started':
add('no shutdown')
-
if needs_update('vrf'):
add('vrf %s' % want['vrf'])
# switching operational vrfs here
@@ -278,6 +283,7 @@ def map_obj_to_commands(updates, module, warnings):
return commands
+
def parse_state(data):
if data[0]['enabled']:
return 'started'
@@ -299,6 +305,7 @@ def map_config_to_obj(module):
'state': parse_state(out)
}
+
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
@@ -320,6 +327,7 @@ def map_params_to_obj(module):
return obj
+
def verify_state(updates, module):
want, have = updates
@@ -348,6 +356,7 @@ def verify_state(updates, module):
if timeout == 0:
module.fail_json(msg='timeout expired before eapi running state changed')
+
def collect_facts(module, result):
out = run_commands(module, ['show management api http-commands | json'])
facts = dict(eos_eapi_urls=dict())
@@ -359,6 +368,7 @@ def collect_facts(module, result):
facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/eos/eos_facts.py b/lib/ansible/modules/network/eos/eos_facts.py
index e0961abad0..5dc7b20312 100644
--- a/lib/ansible/modules/network/eos/eos_facts.py
+++ b/lib/ansible/modules/network/eos/eos_facts.py
@@ -142,6 +142,7 @@ from ansible.module_utils.six import iteritems
from ansible.module_utils.network.eos.eos import run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
+
class FactsBase(object):
COMMANDS = frozenset()
@@ -190,6 +191,7 @@ class Default(FactsBase):
value = None
return dict(image=value)
+
class Hardware(FactsBase):
COMMANDS = [
@@ -218,6 +220,7 @@ class Hardware(FactsBase):
memtotal_mb=int(values['memTotal']) / 1024
)
+
class Config(FactsBase):
COMMANDS = ['show running-config']
@@ -312,6 +315,7 @@ FACT_SUBSETS = dict(
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+
def main():
"""main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/eos/eos_system.py b/lib/ansible/modules/network/eos/eos_system.py
index d8449e5fc2..44fdd1725e 100644
--- a/lib/ansible/modules/network/eos/eos_system.py
+++ b/lib/ansible/modules/network/eos/eos_system.py
@@ -135,6 +135,7 @@ from ansible.module_utils.network.eos.eos import eos_argument_spec
_CONFIGURED_VRFS = None
+
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
@@ -144,11 +145,12 @@ def has_vrf(module, vrf):
_CONFIGURED_VRFS.append('default')
return vrf in _CONFIGURED_VRFS
+
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
- needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
+ def needs_update(x): return want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['domain_name']:
@@ -205,9 +207,9 @@ def map_obj_to_commands(want, have, module):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
if item['vrf'] not in ('default', None):
values = (item['vrf'], item['server'])
- commands.append('no ip name-server vrf %s %s' % values)
+ commands.append('no ip name-server vrf %s %s' % values)
else:
- commands.append('no ip name-server %s' % item['server'])
+ commands.append('no ip name-server %s' % item['server'])
# handle name_servers items to be added
for item in want['name_servers']:
@@ -222,31 +224,36 @@ def map_obj_to_commands(want, have, module):
return commands
+
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
if match:
return match.group(1)
+
def parse_domain_name(config):
match = re.search(r'^ip domain-name (\S+)', config, re.M)
if match:
return match.group(1)
+
def parse_lookup_source(config):
objects = list()
regex = r'ip domain lookup (?:vrf (\S+) )*source-interface (\S+)'
for vrf, intf in re.findall(regex, config, re.M):
if len(vrf) == 0:
- vrf= None
+ vrf = None
objects.append({'interface': intf, 'vrf': vrf})
return objects
+
def parse_name_servers(config):
objects = list()
for vrf, addr in re.findall(r'ip name-server vrf (\S+) (\S+)', config, re.M):
objects.append({'server': addr, 'vrf': vrf})
return objects
+
def map_config_to_obj(module):
config = get_config(module)
return {
@@ -257,6 +264,7 @@ def map_config_to_obj(module):
'name_servers': parse_name_servers(config)
}
+
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
@@ -282,6 +290,7 @@ def map_params_to_obj(module):
return obj
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/eos/eos_user.py b/lib/ansible/modules/network/eos/eos_user.py
index 9bdec10838..be4b895000 100644
--- a/lib/ansible/modules/network/eos/eos_user.py
+++ b/lib/ansible/modules/network/eos/eos_user.py
@@ -174,8 +174,9 @@ def map_obj_to_commands(updates, module):
for update in updates:
want, have = update
- needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
- add = lambda x: commands.append('username %s %s' % (want['name'], x))
+ def needs_update(x): return want.get(x) and (want.get(x) != have.get(x))
+
+ def add(x): return commands.append('username %s %s' % (want['name'], x))
if want['state'] == 'absent':
commands.append('no username %s' % want['name'])
diff --git a/lib/ansible/modules/network/f5/bigip_monitor_tcp.py b/lib/ansible/modules/network/f5/bigip_monitor_tcp.py
index 2923c53428..b0abb48396 100644
--- a/lib/ansible/modules/network/f5/bigip_monitor_tcp.py
+++ b/lib/ansible/modules/network/f5/bigip_monitor_tcp.py
@@ -548,9 +548,9 @@ class Difference(object):
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
- raise F5ModuleError(
- "Parameter 'interval' must be less than 'timeout'."
- )
+ raise F5ModuleError(
+ "Parameter 'interval' must be less than 'timeout'."
+ )
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
diff --git a/lib/ansible/modules/network/f5/bigip_ucs.py b/lib/ansible/modules/network/f5/bigip_ucs.py
index ee6919b531..568ace4153 100644
--- a/lib/ansible/modules/network/f5/bigip_ucs.py
+++ b/lib/ansible/modules/network/f5/bigip_ucs.py
@@ -466,6 +466,7 @@ class V1Manager(BaseManager):
* No API to upload UCS files
"""
+
def create_on_device(self):
remote_path = "/var/local/ucs"
tpath_name = '/var/config/rest/downloads'
diff --git a/lib/ansible/modules/network/f5/bigip_wait.py b/lib/ansible/modules/network/f5/bigip_wait.py
index 8708ec27a6..3494fe3653 100644
--- a/lib/ansible/modules/network/f5/bigip_wait.py
+++ b/lib/ansible/modules/network/f5/bigip_wait.py
@@ -121,6 +121,7 @@ class AnsibleF5ClientStub(AnsibleF5Client):
the result will replace this work here.
"""
+
def __init__(self, argument_spec=None, supports_check_mode=False,
mutually_exclusive=None, required_together=None,
required_if=None, required_one_of=None, add_file_common_args=False,
diff --git a/lib/ansible/modules/network/fortios/fortios_config.py b/lib/ansible/modules/network/fortios/fortios_config.py
index 4761c6c0bc..367046740c 100644
--- a/lib/ansible/modules/network/fortios/fortios_config.py
+++ b/lib/ansible/modules/network/fortios/fortios_config.py
@@ -76,26 +76,26 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.fortios.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.network.fortios.fortios import backup
-#check for pyFG lib
+# check for pyFG lib
try:
from pyFG import FortiOS, FortiConfig
from pyFG.fortios import logger
from pyFG.exceptions import CommandExecutionException, FailedCommit, ForcedCommit
- HAS_PYFG=True
+ HAS_PYFG = True
except:
- HAS_PYFG=False
+ HAS_PYFG = False
# some blocks don't support update, so remove them
-NOT_UPDATABLE_CONFIG_OBJECTS=[
+NOT_UPDATABLE_CONFIG_OBJECTS = [
"vpn certificate local",
]
def main():
argument_spec = dict(
- src = dict(type='str', default=None),
- filter = dict(type='str', default=""),
+ src=dict(type='str', default=None),
+ filter=dict(type='str', default=""),
)
argument_spec.update(fortios_argument_spec)
@@ -114,20 +114,20 @@ def main():
if not HAS_PYFG:
module.fail_json(msg='Could not import the python library pyFG required by this module')
- #define device
- f = FortiOS( module.params['host'],
- username=module.params['username'],
- password=module.params['password'],
- timeout=module.params['timeout'],
- vdom=module.params['vdom'])
+ # define device
+ f = FortiOS(module.params['host'],
+ username=module.params['username'],
+ password=module.params['password'],
+ timeout=module.params['timeout'],
+ vdom=module.params['vdom'])
- #connect
+ # connect
try:
f.open()
except:
module.fail_json(msg='Error connecting device')
- #get config
+ # get config
try:
f.load_config(path=module.params['filter'])
result['running_config'] = f.running_config.to_text()
@@ -135,24 +135,23 @@ def main():
except:
module.fail_json(msg='Error reading running config')
- #backup config
+ # backup config
if module.params['backup']:
backup(module, f.running_config.to_text())
-
- #update config
+ # update config
if module.params['src'] is not None:
- #store config in str
+ # store config in str
try:
conf_str = module.params['src']
f.load_config(in_candidate=True, config_text=conf_str)
except:
module.fail_json(msg="Can't open configuration file, or configuration invalid")
- #get updates lines
+ # get updates lines
change_string = f.compare_config()
- #remove not updatable parts
+ # remove not updatable parts
c = FortiConfig()
c.parse_config_output(change_string)
@@ -165,7 +164,7 @@ def main():
result['change_string'] = change_string
result['changed'] = True
- #Commit if not check mode
+ # Commit if not check mode
if module.check_mode is False and change_string != "":
try:
f.commit(change_string)
diff --git a/lib/ansible/modules/network/fortios/fortios_ipv4_policy.py b/lib/ansible/modules/network/fortios/fortios_ipv4_policy.py
index 0f641c300a..49f2f2fd6d 100644
--- a/lib/ansible/modules/network/fortios/fortios_ipv4_policy.py
+++ b/lib/ansible/modules/network/fortios/fortios_ipv4_policy.py
@@ -202,28 +202,28 @@ from ansible.module_utils.network.fortios.fortios import backup, AnsibleFortios
def main():
argument_spec = dict(
- comment = dict(type='str'),
- id = dict(type='int', required=True),
- src_intf = dict(type='list', default='any'),
- dst_intf = dict(type='list', default='any'),
- state = dict(choices=['present', 'absent'], default='present'),
- src_addr = dict(type='list'),
- dst_addr = dict(type='list'),
- src_addr_negate = dict(type='bool', default=False),
- dst_addr_negate = dict(type='bool', default=False),
- policy_action = dict(choices=['accept','deny'], aliases=['action']),
- service = dict(aliases=['services'], type='list'),
- service_negate = dict(type='bool', default=False),
- schedule = dict(type='str', default='always'),
- nat = dict(type='bool', default=False),
- fixedport = dict(type='bool', default=False),
- poolname = dict(type='str'),
- av_profile = dict(type='str'),
- webfilter_profile = dict(type='str'),
- ips_sensor = dict(type='str'),
- application_list = dict(type='str'),
- logtraffic = dict(choices=['disable','all','utm'], default='utm'),
- logtraffic_start = dict(type='bool', default=False),
+ comment=dict(type='str'),
+ id=dict(type='int', required=True),
+ src_intf=dict(type='list', default='any'),
+ dst_intf=dict(type='list', default='any'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ src_addr=dict(type='list'),
+ dst_addr=dict(type='list'),
+ src_addr_negate=dict(type='bool', default=False),
+ dst_addr_negate=dict(type='bool', default=False),
+ policy_action=dict(choices=['accept', 'deny'], aliases=['action']),
+ service=dict(aliases=['services'], type='list'),
+ service_negate=dict(type='bool', default=False),
+ schedule=dict(type='str', default='always'),
+ nat=dict(type='bool', default=False),
+ fixedport=dict(type='bool', default=False),
+ poolname=dict(type='str'),
+ av_profile=dict(type='str'),
+ webfilter_profile=dict(type='str'),
+ ips_sensor=dict(type='str'),
+ application_list=dict(type='str'),
+ logtraffic=dict(choices=['disable', 'all', 'utm'], default='utm'),
+ logtraffic_start=dict(type='bool', default=False),
)
# merge global required_if & argument_spec from module_utils/fortios.py
@@ -236,7 +236,7 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_if=fortios_required_if + ipv4_policy_required_if ,
+ required_if=fortios_required_if + ipv4_policy_required_if,
)
# init forti object
diff --git a/lib/ansible/modules/network/illumos/dladm_iptun.py b/lib/ansible/modules/network/illumos/dladm_iptun.py
index e116fbcf71..74b0e197bf 100644
--- a/lib/ansible/modules/network/illumos/dladm_iptun.py
+++ b/lib/ansible/modules/network/illumos/dladm_iptun.py
@@ -206,7 +206,6 @@ class IPTun(object):
rc=rc)
-
def main():
module = AnsibleModule(
argument_spec=dict(
diff --git a/lib/ansible/modules/network/illumos/dladm_linkprop.py b/lib/ansible/modules/network/illumos/dladm_linkprop.py
index 6c87315ee2..c4c0050b61 100644
--- a/lib/ansible/modules/network/illumos/dladm_linkprop.py
+++ b/lib/ansible/modules/network/illumos/dladm_linkprop.py
@@ -231,7 +231,6 @@ def main():
supports_check_mode=True
)
-
linkprop = LinkProp(module)
rc = None
diff --git a/lib/ansible/modules/network/ios/ios_banner.py b/lib/ansible/modules/network/ios/ios_banner.py
index 36882b5dea..f3401eaf4d 100644
--- a/lib/ansible/modules/network/ios/ios_banner.py
+++ b/lib/ansible/modules/network/ios/ios_banner.py
@@ -96,6 +96,7 @@ from ansible.module_utils.network.ios.ios import load_config, run_commands
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
import re
+
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
@@ -114,6 +115,7 @@ def map_obj_to_commands(updates, module):
return commands
+
def map_config_to_obj(module):
rc, out, err = exec_command(module, 'show banner %s' % module.params['banner'])
if rc == 0:
@@ -132,6 +134,7 @@ def map_config_to_obj(module):
obj['state'] = 'present'
return obj
+
def map_params_to_obj(module):
text = module.params['text']
if text:
@@ -143,6 +146,7 @@ def map_params_to_obj(module):
'state': module.params['state']
}
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/ios/ios_command.py b/lib/ansible/modules/network/ios/ios_command.py
index 43941e5fef..a5c79d2194 100644
--- a/lib/ansible/modules/network/ios/ios_command.py
+++ b/lib/ansible/modules/network/ios/ios_command.py
@@ -140,12 +140,14 @@ from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
+
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
+
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
@@ -167,6 +169,7 @@ def parse_commands(module, warnings):
)
return commands
+
def main():
"""main entry point for module execution
"""
@@ -220,7 +223,6 @@ def main():
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
-
result.update({
'changed': False,
'stdout': responses,
diff --git a/lib/ansible/modules/network/ios/ios_facts.py b/lib/ansible/modules/network/ios/ios_facts.py
index f89c415ac8..6645bb85d7 100644
--- a/lib/ansible/modules/network/ios/ios_facts.py
+++ b/lib/ansible/modules/network/ios/ios_facts.py
@@ -162,13 +162,13 @@ class FactsBase(object):
self.facts = dict()
self.responses = None
-
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, cmd, check_rc=False)
+
class Default(FactsBase):
COMMANDS = ['show version']
@@ -440,6 +440,7 @@ FACT_SUBSETS = dict(
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+
def main():
"""main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/ios/ios_system.py b/lib/ansible/modules/network/ios/ios_system.py
index ac1e6778ad..e0baf07c4b 100644
--- a/lib/ansible/modules/network/ios/ios_system.py
+++ b/lib/ansible/modules/network/ios/ios_system.py
@@ -125,6 +125,7 @@ from ansible.module_utils.network.common.utils import ComplexList
_CONFIGURED_VRFS = None
+
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
@@ -133,20 +134,23 @@ def has_vrf(module, vrf):
_CONFIGURED_VRFS = re.findall(r'vrf definition (\S+)', config)
return vrf in _CONFIGURED_VRFS
+
def requires_vrf(module, vrf):
if not has_vrf(module, vrf):
module.fail_json(msg='vrf %s is not configured' % vrf)
+
def diff_list(want, have):
adds = [w for w in want if w not in have]
removes = [h for h in have if h not in want]
return (adds, removes)
+
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
- needs_update = lambda x: want.get(x) is not None and (want.get(x) != have.get(x))
+ def needs_update(x): return want.get(x) is not None and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'Router':
@@ -226,7 +230,6 @@ def map_obj_to_commands(want, have, module):
else:
commands.append('ip domain list %s' % item['name'])
-
if want['name_servers']:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in removes:
@@ -243,10 +246,12 @@ def map_obj_to_commands(want, have, module):
return commands
+
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
return match.group(1)
+
def parse_domain_name(config):
match = re.findall(r'^ip domain name (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
@@ -256,6 +261,7 @@ def parse_domain_name(config):
matches.append({'name': name, 'vrf': vrf})
return matches
+
def parse_domain_search(config):
match = re.findall(r'^ip domain list (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
@@ -265,6 +271,7 @@ def parse_domain_search(config):
matches.append({'name': name, 'vrf': vrf})
return matches
+
def parse_name_servers(config):
match = re.findall(r'^ip name-server (?:vrf (\S+) )*(.*)', config, re.M)
matches = list()
@@ -275,11 +282,13 @@ def parse_name_servers(config):
matches.append({'server': server, 'vrf': vrf})
return matches
+
def parse_lookup_source(config):
match = re.search(r'ip domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
+
def map_config_to_obj(module):
config = get_config(module)
return {
@@ -291,6 +300,7 @@ def map_config_to_obj(module):
'name_servers': parse_name_servers(config)
}
+
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
@@ -324,6 +334,7 @@ def map_params_to_obj(module):
return obj
+
def main():
""" Main entry point for Ansible module execution
"""
diff --git a/lib/ansible/modules/network/ios/ios_vrf.py b/lib/ansible/modules/network/ios/ios_vrf.py
index 24c721efd3..e53b33b377 100644
--- a/lib/ansible/modules/network/ios/ios_vrf.py
+++ b/lib/ansible/modules/network/ios/ios_vrf.py
@@ -175,9 +175,10 @@ def add_command_to_vrf(name, cmd, commands):
])
commands.append(cmd)
+
def map_obj_to_commands(updates, module):
commands = list()
- state = module.params['state'] # FIXME NOT USED
+ state = module.params['state'] # FIXME NOT USED
for update in updates:
want, have = update
@@ -226,6 +227,7 @@ def map_obj_to_commands(updates, module):
return commands
+
def parse_description(configobj, name):
cfg = configobj['vrf definition %s' % name]
cfg = '\n'.join(cfg.children)
@@ -233,6 +235,7 @@ def parse_description(configobj, name):
if match:
return match.group(1)
+
def parse_rd(configobj, name):
cfg = configobj['vrf definition %s' % name]
cfg = '\n'.join(cfg.children)
@@ -240,6 +243,7 @@ def parse_rd(configobj, name):
if match:
return match.group(1)
+
def parse_interfaces(configobj, name):
vrf_cfg = 'vrf forwarding %s' % name
interfaces = list()
@@ -249,6 +253,7 @@ def parse_interfaces(configobj, name):
interfaces.append(intf.split(' ')[1])
return interfaces
+
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=1, contents=config)
@@ -290,6 +295,7 @@ def get_param_value(key, item, module):
return value
+
def map_params_to_obj(module):
vrfs = module.params.get('vrfs')
if not vrfs:
@@ -320,6 +326,7 @@ def map_params_to_obj(module):
return objects
+
def update_objects(want, have):
updates = list()
for entry in want:
diff --git a/lib/ansible/modules/network/netscaler/netscaler_lb_vserver.py b/lib/ansible/modules/network/netscaler/netscaler_lb_vserver.py
index c5b26b5e69..ca78849dd9 100644
--- a/lib/ansible/modules/network/netscaler/netscaler_lb_vserver.py
+++ b/lib/ansible/modules/network/netscaler/netscaler_lb_vserver.py
@@ -1900,10 +1900,10 @@ def main():
module.fail_json(msg=msg, diff=lb_vserver_diff(client, module, lbvserver_proxy), **module_result)
if not service_bindings_identical(client, module):
- module.fail_json(msg='service bindings are not identical', **module_result)
+ module.fail_json(msg='service bindings are not identical', **module_result)
if not servicegroup_bindings_identical(client, module):
- module.fail_json(msg='servicegroup bindings are not identical', **module_result)
+ module.fail_json(msg='servicegroup bindings are not identical', **module_result)
if module.params['servicetype'] == 'SSL':
if not ssl_certkey_bindings_identical(client, module):
diff --git a/lib/ansible/modules/network/netvisor/pn_cluster.py b/lib/ansible/modules/network/netvisor/pn_cluster.py
index c50117e296..dada1d16dc 100644
--- a/lib/ansible/modules/network/netvisor/pn_cluster.py
+++ b/lib/ansible/modules/network/netvisor/pn_cluster.py
@@ -243,8 +243,8 @@ def main():
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
- state =dict(required=True, type='str',
- choices=['present', 'absent']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent']),
pn_name=dict(required=True, type='str'),
pn_cluster_node1=dict(type='str'),
pn_cluster_node2=dict(type='str'),
diff --git a/lib/ansible/modules/network/netvisor/pn_ospfarea.py b/lib/ansible/modules/network/netvisor/pn_ospfarea.py
index d0baf731c8..34739d451d 100644
--- a/lib/ansible/modules/network/netvisor/pn_ospfarea.py
+++ b/lib/ansible/modules/network/netvisor/pn_ospfarea.py
@@ -140,8 +140,8 @@ def main():
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
- state =dict(required=True, type='str',
- choices=['present', 'absent', 'update']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_ospf_area=dict(required=True, type='str'),
pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
diff --git a/lib/ansible/modules/network/netvisor/pn_vlag.py b/lib/ansible/modules/network/netvisor/pn_vlag.py
index a8f02f66b9..47962d702c 100644
--- a/lib/ansible/modules/network/netvisor/pn_vlag.py
+++ b/lib/ansible/modules/network/netvisor/pn_vlag.py
@@ -251,8 +251,8 @@ def main():
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
- state =dict(required=True, type='str',
- choices=['present', 'absent', 'update']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_port=dict(type='str'),
pn_peer_port=dict(type='str'),
@@ -269,7 +269,7 @@ def main():
),
required_if=(
["state", "present", ["pn_name", "pn_port", "pn_peer_port",
- "pn_peer_switch"]],
+ "pn_peer_switch"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
diff --git a/lib/ansible/modules/network/netvisor/pn_vlan.py b/lib/ansible/modules/network/netvisor/pn_vlan.py
index 80fd8f7860..b455a5b814 100644
--- a/lib/ansible/modules/network/netvisor/pn_vlan.py
+++ b/lib/ansible/modules/network/netvisor/pn_vlan.py
@@ -175,7 +175,7 @@ def run_cli(module, cli):
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
- state= module.params['state']
+ state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
@@ -232,8 +232,8 @@ def main():
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
- state =dict(required=True, type='str',
- choices=['present', 'absent']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent']),
pn_vlanid=dict(required=True, type='int'),
pn_scope=dict(type='str', choices=['fabric', 'local']),
pn_description=dict(type='str'),
diff --git a/lib/ansible/modules/network/netvisor/pn_vrouter.py b/lib/ansible/modules/network/netvisor/pn_vrouter.py
index 9d3a968bd7..b5271b2c64 100644
--- a/lib/ansible/modules/network/netvisor/pn_vrouter.py
+++ b/lib/ansible/modules/network/netvisor/pn_vrouter.py
@@ -289,8 +289,8 @@ def main():
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
- state =dict(required=True, type='str',
- choices=['present', 'absent', 'update']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_vnet=dict(type='str'),
pn_service_type=dict(type='str', choices=['dedicated', 'shared']),
diff --git a/lib/ansible/modules/network/netvisor/pn_vrouterbgp.py b/lib/ansible/modules/network/netvisor/pn_vrouterbgp.py
index 17d511c32d..ef3fe495d8 100644
--- a/lib/ansible/modules/network/netvisor/pn_vrouterbgp.py
+++ b/lib/ansible/modules/network/netvisor/pn_vrouterbgp.py
@@ -333,7 +333,7 @@ def main():
)
# Accessing the arguments
- state= module.params['state']
+ state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
remote_as = module.params['pn_remote_as']
diff --git a/lib/ansible/modules/network/netvisor/pn_vrouterif.py b/lib/ansible/modules/network/netvisor/pn_vrouterif.py
index 4b55c37554..7f17778186 100644
--- a/lib/ansible/modules/network/netvisor/pn_vrouterif.py
+++ b/lib/ansible/modules/network/netvisor/pn_vrouterif.py
@@ -347,8 +347,8 @@ def main():
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
- state =dict(required=True, type='str',
- choices=['present', 'absent']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_vlan=dict(type='int'),
pn_interface_ip=dict(required=True, type='str'),
diff --git a/lib/ansible/modules/network/netvisor/pn_vrouterlbif.py b/lib/ansible/modules/network/netvisor/pn_vrouterlbif.py
index 82594b44d8..94c55cb54d 100644
--- a/lib/ansible/modules/network/netvisor/pn_vrouterlbif.py
+++ b/lib/ansible/modules/network/netvisor/pn_vrouterlbif.py
@@ -240,8 +240,8 @@ def main():
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
- state =dict(required=True, type='str',
- choices=['present', 'absent']),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_interface_ip=dict(type='str'),
pn_index=dict(type='int')
diff --git a/lib/ansible/modules/network/nxos/_nxos_mtu.py b/lib/ansible/modules/network/nxos/_nxos_mtu.py
index 50fbdcb283..f2a52127a6 100644
--- a/lib/ansible/modules/network/nxos/_nxos_mtu.py
+++ b/lib/ansible/modules/network/nxos/_nxos_mtu.py
@@ -125,6 +125,7 @@ from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
+
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
diff --git a/lib/ansible/modules/network/nxos/nxos_aaa_server_host.py b/lib/ansible/modules/network/nxos/nxos_aaa_server_host.py
index f12a6f91aa..55dceed384 100644
--- a/lib/ansible/modules/network/nxos/nxos_aaa_server_host.py
+++ b/lib/ansible/modules/network/nxos/nxos_aaa_server_host.py
@@ -276,12 +276,11 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
server_type = module.params['server_type']
address = module.params['address']
key = module.params['key']
@@ -311,7 +310,6 @@ def main():
module.fail_json(msg='auth_port and acct_port can only be used'
'when server_type=radius')
-
existing = get_aaa_host_info(module, server_type, address)
end_state = existing
diff --git a/lib/ansible/modules/network/nxos/nxos_config.py b/lib/ansible/modules/network/nxos/nxos_config.py
index a8a9e9ff7f..0232819a49 100644
--- a/lib/ansible/modules/network/nxos/nxos_config.py
+++ b/lib/ansible/modules/network/nxos/nxos_config.py
@@ -300,9 +300,9 @@ def get_candidate(module):
def execute_show_commands(module, commands, output='text'):
cmds = []
for command in to_list(commands):
- cmd = { 'command': command,
- 'output': output,
- }
+ cmd = {'command': command,
+ 'output': output,
+ }
cmds.append(cmd)
body = run_commands(module, cmds)
return body
@@ -460,7 +460,6 @@ def main():
'diff': {'before': str(base_config), 'after': str(running_config)}
})
-
module.exit_json(**result)
diff --git a/lib/ansible/modules/network/nxos/nxos_gir.py b/lib/ansible/modules/network/nxos/nxos_gir.py
index 549d83e73e..373ee56c60 100644
--- a/lib/ansible/modules/network/nxos/nxos_gir.py
+++ b/lib/ansible/modules/network/nxos/nxos_gir.py
@@ -168,6 +168,7 @@ from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
+
def execute_show_command(command, module, command_type='cli_show_ascii'):
cmds = [command]
provider = module.params['provider']
@@ -260,40 +261,39 @@ def main():
argument_spec = dict(
system_mode_maintenance=dict(required=False, type='bool'),
system_mode_maintenance_dont_generate_profile=dict(required=False,
- type='bool'),
+ type='bool'),
system_mode_maintenance_timeout=dict(required=False, type='str'),
system_mode_maintenance_shutdown=dict(required=False, type='bool'),
system_mode_maintenance_on_reload_reset_reason=dict(required=False,
- choices=['hw_error','svc_failure','kern_failure',
- 'wdog_timeout','fatal_error','lc_failure',
- 'match_any','manual_reload']),
+ choices=['hw_error', 'svc_failure', 'kern_failure',
+ 'wdog_timeout', 'fatal_error', 'lc_failure',
+ 'match_any', 'manual_reload']),
state=dict(choices=['absent', 'present', 'default'],
- default='present', required=False)
+ default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[[
- 'system_mode_maintenance',
- 'system_mode_maintenance_dont_generate_profile',
- 'system_mode_maintenance_timeout',
- 'system_mode_maintenance_shutdown',
- 'system_mode_maintenance_on_reload_reset_reason'
- ]],
- required_one_of=[[
- 'system_mode_maintenance',
- 'system_mode_maintenance_dont_generate_profile',
- 'system_mode_maintenance_timeout',
- 'system_mode_maintenance_shutdown',
- 'system_mode_maintenance_on_reload_reset_reason'
- ]],
- supports_check_mode=True)
+ mutually_exclusive=[[
+ 'system_mode_maintenance',
+ 'system_mode_maintenance_dont_generate_profile',
+ 'system_mode_maintenance_timeout',
+ 'system_mode_maintenance_shutdown',
+ 'system_mode_maintenance_on_reload_reset_reason'
+ ]],
+ required_one_of=[[
+ 'system_mode_maintenance',
+ 'system_mode_maintenance_dont_generate_profile',
+ 'system_mode_maintenance_timeout',
+ 'system_mode_maintenance_shutdown',
+ 'system_mode_maintenance_on_reload_reset_reason'
+ ]],
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
state = module.params['state']
mode = get_system_mode(module)
commands = get_commands(module, state, mode)
diff --git a/lib/ansible/modules/network/nxos/nxos_gir_profile_management.py b/lib/ansible/modules/network/nxos/nxos_gir_profile_management.py
index a1058cfc79..d5d02fb764 100644
--- a/lib/ansible/modules/network/nxos/nxos_gir_profile_management.py
+++ b/lib/ansible/modules/network/nxos/nxos_gir_profile_management.py
@@ -176,7 +176,7 @@ def main():
commands=dict(required=False, type='list'),
mode=dict(required=True, choices=['maintenance', 'normal']),
state=dict(choices=['absent', 'present'],
- default='present'),
+ default='present'),
include_defaults=dict(default=False),
config=dict()
)
@@ -184,12 +184,11 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
state = module.params['state']
commands = module.params['commands'] or []
diff --git a/lib/ansible/modules/network/nxos/nxos_igmp.py b/lib/ansible/modules/network/nxos/nxos_igmp.py
index e0c89e828b..6812eba1a5 100644
--- a/lib/ansible/modules/network/nxos/nxos_igmp.py
+++ b/lib/ansible/modules/network/nxos/nxos_igmp.py
@@ -95,6 +95,7 @@ from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
+
def get_current(module):
output = run_commands(module, {'command': 'show running-config', 'output': 'text'})
return {
@@ -126,12 +127,11 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
current = get_current(module)
desired = get_desired(module)
diff --git a/lib/ansible/modules/network/nxos/nxos_igmp_interface.py b/lib/ansible/modules/network/nxos/nxos_igmp_interface.py
index ec32eec561..fe01bac3d0 100644
--- a/lib/ansible/modules/network/nxos/nxos_igmp_interface.py
+++ b/lib/ansible/modules/network/nxos/nxos_igmp_interface.py
@@ -242,6 +242,7 @@ from ansible.module_utils.basic import AnsibleModule
import re
+
def execute_show_command(command, module, command_type='cli_show'):
if command_type == 'cli_show_ascii':
cmds = [{
@@ -501,11 +502,11 @@ def config_remove_oif(existing, existing_oif_prefix_source):
if each.get('prefix') and each.get('source'):
command = 'no ip igmp static-oif {0} source {1} '.format(
each.get('prefix'), each.get('source')
- )
+ )
elif each.get('prefix'):
command = 'no ip igmp static-oif {0}'.format(
each.get('prefix')
- )
+ )
if command:
commands.append(command)
command = None
@@ -533,7 +534,7 @@ def main():
oif_source=dict(required=False, type='str'),
restart=dict(type='bool', default=False),
state=dict(choices=['present', 'absent', 'default'],
- default='present'),
+ default='present'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
@@ -542,12 +543,11 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
state = module.params['state']
interface = module.params['interface']
oif_prefix = module.params['oif_prefix']
@@ -607,7 +607,7 @@ def main():
changed = False
commands = []
proposed = dict((k, v) for k, v in module.params.items()
- if v is not None and k in args)
+ if v is not None and k in args)
CANNOT_ABSENT = ['version', 'startup_query_interval',
'startup_query_count', 'robustness', 'querier_timeout',
diff --git a/lib/ansible/modules/network/nxos/nxos_igmp_snooping.py b/lib/ansible/modules/network/nxos/nxos_igmp_snooping.py
index 9ebe7ea1e2..6d84d40fb8 100644
--- a/lib/ansible/modules/network/nxos/nxos_igmp_snooping.py
+++ b/lib/ansible/modules/network/nxos/nxos_igmp_snooping.py
@@ -274,7 +274,7 @@ def main():
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items())
- )
+ )
if delta:
command = config_igmp_snooping(delta, existing)
if command:
@@ -283,7 +283,7 @@ def main():
proposed = get_igmp_snooping_defaults()
delta = dict(
set(proposed.items()).difference(existing.items())
- )
+ )
if delta:
command = config_igmp_snooping(delta, existing, default=True)
if command:
diff --git a/lib/ansible/modules/network/nxos/nxos_ip_interface.py b/lib/ansible/modules/network/nxos/nxos_ip_interface.py
index e4da09acf4..74c68c6260 100644
--- a/lib/ansible/modules/network/nxos/nxos_ip_interface.py
+++ b/lib/ansible/modules/network/nxos/nxos_ip_interface.py
@@ -343,13 +343,13 @@ def parse_interface_data(body):
splitted_body = body.split('\n')
for index in range(0, len(splitted_body) - 1):
- if "Encapsulation 802.1Q" in splitted_body[index]:
- regex = r'(.+?ID\s(?P<dot1q>\d+).*)?'
- match = re.match(regex, splitted_body[index])
- if match:
- match_dict = match.groupdict()
- if match_dict['dot1q'] is not None:
- return int(match_dict['dot1q'])
+ if "Encapsulation 802.1Q" in splitted_body[index]:
+ regex = r'(.+?ID\s(?P<dot1q>\d+).*)?'
+ match = re.match(regex, splitted_body[index])
+ if match:
+ match_dict = match.groupdict()
+ if match_dict['dot1q'] is not None:
+ return int(match_dict['dot1q'])
return 0
diff --git a/lib/ansible/modules/network/nxos/nxos_logging.py b/lib/ansible/modules/network/nxos/nxos_logging.py
index f0821bb739..c198b21dbc 100644
--- a/lib/ansible/modules/network/nxos/nxos_logging.py
+++ b/lib/ansible/modules/network/nxos/nxos_logging.py
@@ -146,7 +146,7 @@ def map_obj_to_commands(updates, module):
pass
if w['facility'] is not None:
- commands.append('logging level {} {}'.format(w['facility'], w['facility_level']))
+ commands.append('logging level {} {}'.format(w['facility'], w['facility_level']))
return commands
diff --git a/lib/ansible/modules/network/nxos/nxos_ntp_auth.py b/lib/ansible/modules/network/nxos/nxos_ntp_auth.py
index b8cce10b34..23b55ee2c4 100644
--- a/lib/ansible/modules/network/nxos/nxos_ntp_auth.py
+++ b/lib/ansible/modules/network/nxos/nxos_ntp_auth.py
@@ -198,7 +198,7 @@ def get_ntp_auth_info(key_id, module):
def auth_type_to_num(auth_type):
- if auth_type == 'encrypt' :
+ if auth_type == 'encrypt':
return '7'
else:
return '0'
@@ -258,12 +258,11 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
diff --git a/lib/ansible/modules/network/nxos/nxos_ntp_options.py b/lib/ansible/modules/network/nxos/nxos_ntp_options.py
index d412413898..afa535f07d 100644
--- a/lib/ansible/modules/network/nxos/nxos_ntp_options.py
+++ b/lib/ansible/modules/network/nxos/nxos_ntp_options.py
@@ -95,7 +95,7 @@ def get_current(module):
cmd = ('show running-config', 'show ntp logging')
output = run_commands(module, ({'command': cmd[0], 'output': 'text'},
- {'command': cmd[1], 'output': 'text'}))
+ {'command': cmd[1], 'output': 'text'}))
match = re.search(r"^ntp master(?: (\d+))", output[0], re.M)
if match:
@@ -167,7 +167,6 @@ def main():
else:
commands.append('no ntp logging')
-
result['commands'] = commands
result['updates'] = commands
diff --git a/lib/ansible/modules/network/nxos/nxos_nxapi.py b/lib/ansible/modules/network/nxos/nxos_nxapi.py
index 1e2cfcc01f..3f75808c88 100644
--- a/lib/ansible/modules/network/nxos/nxos_nxapi.py
+++ b/lib/ansible/modules/network/nxos/nxos_nxapi.py
@@ -130,6 +130,7 @@ from ansible.module_utils.network.nxos.nxos import check_args as nxos_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
+
def check_args(module, warnings):
provider = module.params['provider']
if provider['transport'] == 'nxapi':
@@ -159,10 +160,11 @@ def check_args(module, warnings):
return warnings
+
def map_obj_to_commands(want, have, module):
commands = list()
- needs_update = lambda x: want.get(x) is not None and (want.get(x) != have.get(x))
+ def needs_update(x): return want.get(x) is not None and (want.get(x) != have.get(x))
if needs_update('state'):
if want['state'] == 'absent':
@@ -191,6 +193,7 @@ def map_obj_to_commands(want, have, module):
return commands
+
def parse_http(data):
http_res = [r'nxapi http port (\d+)']
http_port = None
@@ -203,6 +206,7 @@ def parse_http(data):
return {'http': http_port is not None, 'http_port': http_port}
+
def parse_https(data):
https_res = [r'nxapi https port (\d+)']
https_port = None
@@ -215,6 +219,7 @@ def parse_https(data):
return {'https': https_port is not None, 'https_port': https_port}
+
def parse_sandbox(data):
sandbox = [item for item in data.split('\n') if re.search(r'.*sandbox.*', item)]
value = False
@@ -222,6 +227,7 @@ def parse_sandbox(data):
value = True
return {'sandbox': value}
+
def map_config_to_obj(module):
out = run_commands(module, ['show run all | inc nxapi'], check_rc=False)[0]
match = re.search(r'no feature nxapi', out, re.M)
@@ -240,6 +246,7 @@ def map_config_to_obj(module):
return obj
+
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
@@ -252,6 +259,7 @@ def map_params_to_obj(module):
return obj
+
def main():
""" main entry point for module execution
"""
@@ -275,8 +283,6 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
-
-
warnings = list()
check_args(module, warnings)
diff --git a/lib/ansible/modules/network/nxos/nxos_overlay_global.py b/lib/ansible/modules/network/nxos/nxos_overlay_global.py
index 32e8415d6c..abd77a5cba 100644
--- a/lib/ansible/modules/network/nxos/nxos_overlay_global.py
+++ b/lib/ansible/modules/network/nxos/nxos_overlay_global.py
@@ -134,7 +134,7 @@ def normalize_mac(proposed_mac, module):
else:
octect_len = len(octect)
padding = 4 - octect_len
- splitted_mac.append(octect.zfill(padding+1))
+ splitted_mac.append(octect.zfill(padding + 1))
elif ':' in proposed_mac:
splitted_mac = proposed_mac.split(':')
@@ -150,7 +150,7 @@ def normalize_mac(proposed_mac, module):
module.fail_json(msg='Invalid MAC address format', proposed_mac=proposed_mac)
joined_mac = ''.join(splitted_mac)
- mac = [joined_mac[i:i+4] for i in range(0, len(joined_mac), 4)]
+ mac = [joined_mac[i:i + 4] for i in range(0, len(joined_mac), 4)]
return '.'.join(mac).upper()
@@ -172,7 +172,7 @@ def main():
existing = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
- if v is not None and k in args)
+ if v is not None and k in args)
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
diff --git a/lib/ansible/modules/network/nxos/nxos_ping.py b/lib/ansible/modules/network/nxos/nxos_ping.py
index ee24a5fba4..9a9572088d 100644
--- a/lib/ansible/modules/network/nxos/nxos_ping.py
+++ b/lib/ansible/modules/network/nxos/nxos_ping.py
@@ -110,7 +110,7 @@ from ansible.module_utils.basic import AnsibleModule
def get_summary(results_list, reference_point):
- summary_string = results_list[reference_point+1]
+ summary_string = results_list[reference_point + 1]
summary_list = summary_string.split(',')
summary = dict(
@@ -119,7 +119,7 @@ def get_summary(results_list, reference_point):
packet_loss=summary_list[2].split('packet')[0].strip(),
)
- if 'bytes from' not in results_list[reference_point-2]:
+ if 'bytes from' not in results_list[reference_point - 2]:
ping_pass = False
else:
ping_pass = True
@@ -168,7 +168,7 @@ def get_ping_results(command, module):
splitted_ping = ping.split('\n')
reference_point = get_statistics_summary_line(splitted_ping)
summary, ping_pass = get_summary(splitted_ping, reference_point)
- rtt = get_rtt(splitted_ping, summary['packet_loss'], reference_point+2)
+ rtt = get_rtt(splitted_ping, summary['packet_loss'], reference_point + 2)
return (summary, rtt, ping_pass)
diff --git a/lib/ansible/modules/network/nxos/nxos_smu.py b/lib/ansible/modules/network/nxos/nxos_smu.py
index 427819377d..aeb1bc9281 100644
--- a/lib/ansible/modules/network/nxos/nxos_smu.py
+++ b/lib/ansible/modules/network/nxos/nxos_smu.py
@@ -131,13 +131,12 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
-
pkg = module.params['pkg']
file_system = module.params['file_system']
remote_exists = remote_file_exists(module, pkg, file_system=file_system)
diff --git a/lib/ansible/modules/network/nxos/nxos_snapshot.py b/lib/ansible/modules/network/nxos/nxos_snapshot.py
index 07b50e6475..e8f07af70b 100644
--- a/lib/ansible/modules/network/nxos/nxos_snapshot.py
+++ b/lib/ansible/modules/network/nxos/nxos_snapshot.py
@@ -333,6 +333,7 @@ def write_on_file(content, filename, module):
return filepath
+
def main():
argument_spec = dict(
action=dict(required=True, choices=['create', 'add', 'compare', 'delete', 'delete_all']),
@@ -421,4 +422,4 @@ def main():
module.exit_json(**result)
if __name__ == '__main__':
- main()
+ main()
diff --git a/lib/ansible/modules/network/nxos/nxos_static_route.py b/lib/ansible/modules/network/nxos/nxos_static_route.py
index 1e44755d71..3989a88b74 100644
--- a/lib/ansible/modules/network/nxos/nxos_static_route.py
+++ b/lib/ansible/modules/network/nxos/nxos_static_route.py
@@ -182,7 +182,7 @@ def set_route_command(module, prefix):
def get_dotted_mask(mask):
bits = 0
- for i in range(32-mask, 32):
+ for i in range(32 - mask, 32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff)))
return mask
diff --git a/lib/ansible/modules/network/nxos/nxos_system.py b/lib/ansible/modules/network/nxos/nxos_system.py
index bf1909c61b..41dbfb4a2e 100644
--- a/lib/ansible/modules/network/nxos/nxos_system.py
+++ b/lib/ansible/modules/network/nxos/nxos_system.py
@@ -120,6 +120,7 @@ from ansible.module_utils.network.common.utils import ComplexList
_CONFIGURED_VRFS = None
+
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
@@ -128,12 +129,14 @@ def has_vrf(module, vrf):
_CONFIGURED_VRFS = re.findall(r'vrf context (\S+)', config)
return vrf in _CONFIGURED_VRFS
+
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
- needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
- difference = lambda x,y,z: [item for item in x[z] if item not in y[z]]
+ def needs_update(x): return want.get(x) and (want.get(x) != have.get(x))
+
+ def difference(x, y, z): return [item for item in x[z] if item not in y[z]]
def remove(cmd, commands, vrf=None):
if vrf:
@@ -195,7 +198,7 @@ def map_obj_to_commands(want, have, module):
if want['name_servers']:
for item in difference(have, want, 'name_servers'):
- cmd = 'no ip name-server %s' % item['server']
+ cmd = 'no ip name-server %s' % item['server']
remove(cmd, commands, item['vrf'])
for item in difference(want, have, 'name_servers'):
cmd = 'ip name-server %s' % item['server']
@@ -206,11 +209,13 @@ def map_obj_to_commands(want, have, module):
return commands
+
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
if match:
return match.group(1)
+
def parse_domain_name(config, vrf_config):
objects = list()
regex = re.compile(r'ip domain-name (\S+)')
@@ -226,6 +231,7 @@ def parse_domain_name(config, vrf_config):
return objects
+
def parse_domain_search(config, vrf_config):
objects = list()
@@ -238,6 +244,7 @@ def parse_domain_search(config, vrf_config):
return objects
+
def parse_name_servers(config, vrf_config, vrfs):
objects = list()
@@ -256,11 +263,13 @@ def parse_name_servers(config, vrf_config, vrfs):
return objects
+
def parse_system_mtu(config):
match = re.search(r'^system jumbomtu (\d+)', config, re.M)
if match:
return int(match.group(1))
+
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=2, contents=config)
@@ -281,10 +290,12 @@ def map_config_to_obj(module):
'system_mtu': parse_system_mtu(config)
}
+
def validate_system_mtu(value, module):
if not 1500 <= value <= 9216:
module.fail_json(msg='system_mtu must be between 1500 and 9216')
+
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
@@ -316,6 +327,7 @@ def map_params_to_obj(module):
return obj
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/nxos/nxos_udld.py b/lib/ansible/modules/network/nxos/nxos_udld.py
index c0f364b86a..2291d92f0d 100644
--- a/lib/ansible/modules/network/nxos/nxos_udld.py
+++ b/lib/ansible/modules/network/nxos/nxos_udld.py
@@ -157,7 +157,6 @@ def apply_key_map(key_map, table):
return new_dict
-
def get_commands_config_udld_global(delta, reset):
config_args = {
'enabled': 'udld aggressive',
@@ -224,13 +223,12 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- required_one_of=[['aggressive', 'msg_time', 'reset']],
- supports_check_mode=True)
+ required_one_of=[['aggressive', 'msg_time', 'reset']],
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
-
aggressive = module.params['aggressive']
msg_time = module.params['msg_time']
reset = module.params['reset']
diff --git a/lib/ansible/modules/network/nxos/nxos_udld_interface.py b/lib/ansible/modules/network/nxos/nxos_udld_interface.py
index 08e4b4b3f9..3e641d7fe0 100644
--- a/lib/ansible/modules/network/nxos/nxos_udld_interface.py
+++ b/lib/ansible/modules/network/nxos/nxos_udld_interface.py
@@ -171,7 +171,7 @@ def get_commands_config_udld_interface1(delta, interface, module, existing):
if mode == 'aggressive':
command = 'udld aggressive'
if mode == 'enabled':
- command = 'no udld aggressive ; udld enable'
+ command = 'no udld aggressive ; udld enable'
elif mode == 'disabled':
command = 'no udld aggressive ; no udld enable'
if command:
@@ -188,7 +188,7 @@ def get_commands_config_udld_interface2(delta, interface, module, existing):
if mode == 'aggressive':
command = 'udld aggressive'
if mode == 'enabled':
- command = 'no udld aggressive ; no udld disable'
+ command = 'no udld aggressive ; no udld disable'
elif mode == 'disabled':
command = 'no udld aggressive ; udld disable'
if command:
@@ -237,7 +237,7 @@ def get_commands_remove_udld_interface2(delta, interface, module, existing):
def main():
argument_spec = dict(
mode=dict(choices=['enabled', 'disabled', 'aggressive'],
- required=True),
+ required=True),
interface=dict(type='str', required=True),
state=dict(choices=['absent', 'present'], default='present'),
)
@@ -245,7 +245,7 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
@@ -265,14 +265,14 @@ def main():
if state == 'present':
if delta:
command = get_commands_config_udld_interface1(delta, interface,
- module, existing)
+ module, existing)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_udld_interface1(
dict(common), interface, module, existing
- )
+ )
commands.append(command)
cmds = flatten_list(commands)
@@ -297,11 +297,11 @@ def main():
commands = []
if state == 'present':
command = get_commands_config_udld_interface2(delta, interface,
- module, existing)
+ module, existing)
elif state == 'absent':
command = get_commands_remove_udld_interface2(
dict(common), interface, module, existing
- )
+ )
commands.append(command)
cmds = flatten_list(commands)
diff --git a/lib/ansible/modules/network/nxos/nxos_user.py b/lib/ansible/modules/network/nxos/nxos_user.py
index c75c5439b8..599409f8ab 100644
--- a/lib/ansible/modules/network/nxos/nxos_user.py
+++ b/lib/ansible/modules/network/nxos/nxos_user.py
@@ -166,6 +166,7 @@ def validate_roles(value, module):
if item not in VALID_ROLES:
module.fail_json(msg='invalid role specified')
+
def map_obj_to_commands(updates, module):
commands = list()
state = module.params['state']
@@ -174,9 +175,11 @@ def map_obj_to_commands(updates, module):
for update in updates:
want, have = update
- needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
- add = lambda x: commands.append('username %s %s' % (want['name'], x))
- remove = lambda x: commands.append('no username %s %s' % (want['name'], x))
+ def needs_update(x): return want.get(x) and (want.get(x) != have.get(x))
+
+ def add(x): return commands.append('username %s %s' % (want['name'], x))
+
+ def remove(x): return commands.append('no username %s %s' % (want['name'], x))
if want['state'] == 'absent':
commands.append('no username %s' % want['name'])
@@ -192,7 +195,6 @@ def map_obj_to_commands(updates, module):
if needs_update('sshkey'):
add('sshkey %s' % want['sshkey'])
-
if want['roles']:
if have:
for item in set(have['roles']).difference(want['roles']):
@@ -204,13 +206,14 @@ def map_obj_to_commands(updates, module):
for item in want['roles']:
add('role %s' % item)
-
return commands
+
def parse_password(data):
if not data.get('remote_login'):
return '<PASSWORD>'
+
def parse_roles(data):
configured_roles = data.get('TABLE_role')['ROW_role']
roles = list()
@@ -219,6 +222,7 @@ def parse_roles(data):
roles.append(item['role'])
return roles
+
def map_config_to_obj(module):
out = run_commands(module, ['show user-account | json'])
data = out[0]
@@ -235,6 +239,7 @@ def map_config_to_obj(module):
})
return objects
+
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
@@ -249,6 +254,7 @@ def get_param_value(key, item, module):
return value
+
def map_params_to_obj(module):
aggregate = module.params['aggregate']
if not aggregate:
@@ -290,6 +296,7 @@ def map_params_to_obj(module):
return objects
+
def update_objects(want, have):
updates = list()
for entry in want:
@@ -302,6 +309,7 @@ def update_objects(want, have):
updates.append((entry, item))
return updates
+
def main():
""" main entry point for module execution
"""
@@ -328,7 +336,6 @@ def main():
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
-
result = {'changed': False}
warnings = list()
diff --git a/lib/ansible/modules/network/nxos/nxos_vrf.py b/lib/ansible/modules/network/nxos/nxos_vrf.py
index a4c7e747eb..358278554f 100644
--- a/lib/ansible/modules/network/nxos/nxos_vrf.py
+++ b/lib/ansible/modules/network/nxos/nxos_vrf.py
@@ -162,7 +162,7 @@ def get_vrf_description(vrf, module):
for element in splitted_body:
if 'description' in element:
match_description = re.match(descr_regex, element,
- re.DOTALL)
+ re.DOTALL)
group_description = match_description.groupdict()
description = group_description["descr"]
@@ -182,7 +182,7 @@ def get_vrf(vrf, module):
vrf_key = {
'vrf_name': 'vrf',
'vrf_state': 'admin_state'
- }
+ }
try:
body = execute_show_command(command, module)[0]
diff --git a/lib/ansible/modules/network/nxos/nxos_vtp_domain.py b/lib/ansible/modules/network/nxos/nxos_vtp_domain.py
index 7a9fbde556..53c4080207 100644
--- a/lib/ansible/modules/network/nxos/nxos_vtp_domain.py
+++ b/lib/ansible/modules/network/nxos/nxos_vtp_domain.py
@@ -167,7 +167,7 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
diff --git a/lib/ansible/modules/network/nxos/nxos_vtp_password.py b/lib/ansible/modules/network/nxos/nxos_vtp_password.py
index e484d95ca5..14c04411c9 100644
--- a/lib/ansible/modules/network/nxos/nxos_vtp_password.py
+++ b/lib/ansible/modules/network/nxos/nxos_vtp_password.py
@@ -194,13 +194,13 @@ def main():
argument_spec = dict(
vtp_password=dict(type='str', no_log=True),
state=dict(choices=['absent', 'present'],
- default='present'),
+ default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
diff --git a/lib/ansible/modules/network/nxos/nxos_vtp_version.py b/lib/ansible/modules/network/nxos/nxos_vtp_version.py
index f25866174f..469b2bb69b 100644
--- a/lib/ansible/modules/network/nxos/nxos_vtp_version.py
+++ b/lib/ansible/modules/network/nxos/nxos_vtp_version.py
@@ -165,7 +165,7 @@ def main():
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
+ supports_check_mode=True)
warnings = list()
check_args(module, warnings)
diff --git a/lib/ansible/modules/network/ordnance/ordnance_config.py b/lib/ansible/modules/network/ordnance/ordnance_config.py
index a123b0d998..6f969604fc 100644
--- a/lib/ansible/modules/network/ordnance/ordnance_config.py
+++ b/lib/ansible/modules/network/ordnance/ordnance_config.py
@@ -206,6 +206,7 @@ def check_args(module, warnings):
'match=none instead. This argument will be '
'removed in the future')
+
def extract_banners(config):
banners = {}
banner_cmds = re.findall(r'^banner (\w+)', config, re.M)
@@ -225,6 +226,7 @@ def extract_banners(config):
config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config)
return (config, banners)
+
def diff_banners(want, have):
candidate = {}
for key, value in iteritems(want):
@@ -232,6 +234,7 @@ def diff_banners(want, have):
candidate[key] = value
return candidate
+
def load_banners(module, banners):
delimiter = module.params['multiline_delimiter']
for key, value in iteritems(banners):
@@ -242,6 +245,7 @@ def load_banners(module, banners):
time.sleep(1)
module.connection.shell.receive()
+
def get_config(module, result):
contents = module.params['config']
if not contents:
@@ -251,6 +255,7 @@ def get_config(module, result):
contents, banners = extract_banners(contents)
return NetworkConfig(indent=1, contents=contents), banners
+
def get_candidate(module):
candidate = NetworkConfig(indent=1)
banners = {}
@@ -265,6 +270,7 @@ def get_candidate(module):
return candidate, banners
+
def run(module, result):
match = module.params['match']
replace = module.params['replace']
@@ -275,7 +281,7 @@ def run(module, result):
if match != 'none':
config, have_banners = get_config(module, result)
path = module.params['parents']
- configobjs = candidate.difference(config, path=path,match=match,
+ configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
@@ -311,6 +317,7 @@ def run(module, result):
module.config.save_config()
result['changed'] = True
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/ordnance/ordnance_facts.py b/lib/ansible/modules/network/ordnance/ordnance_facts.py
index eccfc5d7a7..ae4742f192 100644
--- a/lib/ansible/modules/network/ordnance/ordnance_facts.py
+++ b/lib/ansible/modules/network/ordnance/ordnance_facts.py
@@ -223,6 +223,7 @@ FACT_SUBSETS = dict(
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
diff --git a/lib/ansible/modules/network/ovs/openvswitch_bridge.py b/lib/ansible/modules/network/ovs/openvswitch_bridge.py
index b8009b6b4a..2f437d1ec7 100644
--- a/lib/ansible/modules/network/ovs/openvswitch_bridge.py
+++ b/lib/ansible/modules/network/ovs/openvswitch_bridge.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-#coding: utf-8 -*-
+# coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
# Portions copyright @ 2015 VMware, Inc.
@@ -109,6 +109,7 @@ def _fail_mode_to_str(text):
else:
return text.strip()
+
def _external_ids_to_dict(text):
if not text:
return None
@@ -122,6 +123,7 @@ def _external_ids_to_dict(text):
return d
+
def map_obj_to_commands(want, have, module):
commands = list()
@@ -156,7 +158,7 @@ def map_obj_to_commands(want, have, module):
command = templatized_command % module.params
if want['parent']:
- templatized_command = "%(parent)s %(vlan)s"
+ templatized_command = "%(parent)s %(vlan)s"
command += " " + templatized_command % module.params
if want['set']:
@@ -175,7 +177,7 @@ def map_obj_to_commands(want, have, module):
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
- " br-set-external-id %(bridge)s")
+ " br-set-external-id %(bridge)s")
command = templatized_command % module.params
command += " " + k + " " + v
commands.append(command)
diff --git a/lib/ansible/modules/network/ovs/openvswitch_port.py b/lib/ansible/modules/network/ovs/openvswitch_port.py
index f24d9658fe..ed0673a223 100644
--- a/lib/ansible/modules/network/ovs/openvswitch_port.py
+++ b/lib/ansible/modules/network/ovs/openvswitch_port.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-#coding: utf-8 -*-
+# coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
# Portions copyright @ 2015 VMware, Inc.
@@ -118,6 +118,7 @@ def _external_ids_to_dict(text):
return d
+
def _tag_to_str(text):
text = text.strip()
@@ -126,6 +127,7 @@ def _tag_to_str(text):
else:
return text
+
def map_obj_to_commands(want, have, module):
commands = list()
@@ -167,7 +169,7 @@ def map_obj_to_commands(want, have, module):
command = templatized_command % module.params
if want['tag']:
- templatized_command = " tag=%(tag)s"
+ templatized_command = " tag=%(tag)s"
command += templatized_command % module.params
if want['set']:
@@ -181,7 +183,7 @@ def map_obj_to_commands(want, have, module):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s external_ids:")
command = templatized_command % module.params
- command += k + "=" + v
+ command += k + "=" + v
commands.append(command)
return commands
@@ -226,9 +228,10 @@ def map_params_to_obj(module):
return obj
+
def main():
""" Entry point. """
- argument_spec={
+ argument_spec = {
'bridge': {'required': True},
'port': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
diff --git a/lib/ansible/modules/network/panos/panos_admin.py b/lib/ansible/modules/network/panos/panos_admin.py
index 6add314a02..436a39cd40 100644
--- a/lib/ansible/modules/network/panos/panos_admin.py
+++ b/lib/ansible/modules/network/panos/panos_admin.py
@@ -137,7 +137,7 @@ def admin_set(xapi, module, admin_username, admin_password, role):
element='<%s>%s</%s>' % (role, rbval, role))
if admin_password is not None:
- xapi.edit(xpath=_ADMIN_XPATH % admin_username+'/phash',
+ xapi.edit(xpath=_ADMIN_XPATH % admin_username + '/phash',
element='<phash>%s</phash>' % phash)
changed = True
diff --git a/lib/ansible/modules/network/panos/panos_admpwd.py b/lib/ansible/modules/network/panos/panos_admpwd.py
index e9f71fce6a..375df068b0 100644
--- a/lib/ansible/modules/network/panos/panos_admpwd.py
+++ b/lib/ansible/modules/network/panos/panos_admpwd.py
@@ -84,9 +84,9 @@ import sys
try:
import paramiko
- HAS_LIB=True
+ HAS_LIB = True
except ImportError:
- HAS_LIB=False
+ HAS_LIB = False
_PROMPTBUFF = 4096
@@ -101,7 +101,7 @@ def wait_with_timeout(module, shell, prompt, timeout=60):
if len(endresult) != 0 and endresult[-1] == prompt:
break
- if time.time()-now > timeout:
+ if time.time() - now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
@@ -143,14 +143,14 @@ def set_panwfw_password(module, ip_address, key_filename, newpassword, username)
stdout += buff
# enter password for the first time
- shell.send(newpassword+'\n')
+ shell.send(newpassword + '\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the second time
- shell.send(newpassword+'\n')
+ shell.send(newpassword + '\n')
# wait for the config mode prompt
buff = wait_with_timeout(module, shell, "#")
diff --git a/lib/ansible/modules/network/panos/panos_cert_gen_ssh.py b/lib/ansible/modules/network/panos/panos_cert_gen_ssh.py
index 3a6c47830d..7c2516f9c1 100644
--- a/lib/ansible/modules/network/panos/panos_cert_gen_ssh.py
+++ b/lib/ansible/modules/network/panos/panos_cert_gen_ssh.py
@@ -81,7 +81,7 @@ EXAMPLES = '''
signed_by: "root-ca"
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
@@ -96,9 +96,9 @@ import time
try:
import paramiko
- HAS_LIB=True
+ HAS_LIB = True
except ImportError:
- HAS_LIB=False
+ HAS_LIB = False
_PROMPTBUFF = 4096
@@ -113,14 +113,14 @@ def wait_with_timeout(module, shell, prompt, timeout=60):
if len(endresult) != 0 and endresult[-1] == prompt:
break
- if time.time()-now > timeout:
+ if time.time() - now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def generate_cert(module, ip_address, key_filename, password,
- cert_cn, cert_friendly_name, signed_by, rsa_nbits ):
+ cert_cn, cert_friendly_name, signed_by, rsa_nbits):
stdout = ""
client = paramiko.SSHClient()
@@ -154,7 +154,7 @@ def generate_cert(module, ip_address, key_filename, password,
shell.send('exit\n')
if 'Success' not in buff:
- module.fail_json(msg="Error generating self signed certificate: "+stdout)
+ module.fail_json(msg="Error generating self signed certificate: " + stdout)
client.close()
return stdout
diff --git a/lib/ansible/modules/network/panos/panos_check.py b/lib/ansible/modules/network/panos/panos_check.py
index aca02cc6b1..bfbda14e1e 100644
--- a/lib/ansible/modules/network/panos/panos_check.py
+++ b/lib/ansible/modules/network/panos/panos_check.py
@@ -75,7 +75,7 @@ EXAMPLES = '''
delay: 30
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
@@ -131,7 +131,7 @@ def main():
timeout=60
)
- checkpnt = time.time()+timeout
+ checkpnt = time.time() + timeout
while True:
try:
xapi.op(cmd="show jobs all", cmd_xml=True)
diff --git a/lib/ansible/modules/network/panos/panos_dag.py b/lib/ansible/modules/network/panos/panos_dag.py
index db77d6bd83..7748c7ccc2 100644
--- a/lib/ansible/modules/network/panos/panos_dag.py
+++ b/lib/ansible/modules/network/panos/panos_dag.py
@@ -71,7 +71,7 @@ EXAMPLES = '''
dag_filter: "'aws-tag.aws:cloudformation:logical-id.ServerInstance' and 'instanceState.running'"
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
diff --git a/lib/ansible/modules/network/panos/panos_import.py b/lib/ansible/modules/network/panos/panos_import.py
index 7e0f82151d..db4e523fd8 100644
--- a/lib/ansible/modules/network/panos/panos_import.py
+++ b/lib/ansible/modules/network/panos/panos_import.py
@@ -73,7 +73,7 @@ EXAMPLES = '''
category: software
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
@@ -118,7 +118,7 @@ def import_file(xapi, module, ip_address, file_, category):
)
r = requests.post(
- 'https://'+ip_address+'/api/',
+ 'https://' + ip_address + '/api/',
verify=False,
params=params,
headers={'Content-Type': mef.content_type},
diff --git a/lib/ansible/modules/network/panos/panos_interface.py b/lib/ansible/modules/network/panos/panos_interface.py
index 66be2315df..4aea9f1390 100644
--- a/lib/ansible/modules/network/panos/panos_interface.py
+++ b/lib/ansible/modules/network/panos/panos_interface.py
@@ -78,7 +78,7 @@ EXAMPLES = '''
create_default_route: "yes"
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
@@ -98,8 +98,8 @@ _IF_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
_ZONE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry/zone/entry"
-_ZONE_XPATH_QUERY = _ZONE_XPATH+"[network/layer3/member/text()='%s']"
-_ZONE_XPATH_IF = _ZONE_XPATH+"[@name='%s']/network/layer3/member[text()='%s']"
+_ZONE_XPATH_QUERY = _ZONE_XPATH + "[network/layer3/member/text()='%s']"
+_ZONE_XPATH_IF = _ZONE_XPATH + "[@name='%s']/network/layer3/member[text()='%s']"
_VR_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/virtual-router/entry"
@@ -120,9 +120,9 @@ def add_dhcp_if(xapi, if_name, zone_name, create_default_route):
if_xml = (''.join(if_xml)) % (if_name, cdr)
xapi.edit(xpath=_IF_XPATH % if_name, element=if_xml)
- xapi.set(xpath=_ZONE_XPATH+"[@name='%s']/network/layer3" % zone_name,
+ xapi.set(xpath=_ZONE_XPATH + "[@name='%s']/network/layer3" % zone_name,
element='<member>%s</member>' % if_name)
- xapi.set(xpath=_VR_XPATH+"[@name='default']/interface",
+ xapi.set(xpath=_VR_XPATH + "[@name='default']/interface",
element='<member>%s</member>' % if_name)
return True
diff --git a/lib/ansible/modules/network/panos/panos_loadcfg.py b/lib/ansible/modules/network/panos/panos_loadcfg.py
index 7f2146e581..bd30e3accb 100644
--- a/lib/ansible/modules/network/panos/panos_loadcfg.py
+++ b/lib/ansible/modules/network/panos/panos_loadcfg.py
@@ -71,7 +71,7 @@ EXAMPLES = '''
file: "{{result.filename}}"
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
diff --git a/lib/ansible/modules/network/panos/panos_mgtconfig.py b/lib/ansible/modules/network/panos/panos_mgtconfig.py
index db103249bb..fde5b640d2 100644
--- a/lib/ansible/modules/network/panos/panos_mgtconfig.py
+++ b/lib/ansible/modules/network/panos/panos_mgtconfig.py
@@ -81,7 +81,7 @@ EXAMPLES = '''
panorama_secondary: "1.1.1.4"
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
@@ -112,11 +112,11 @@ def set_dns_server(xapi, new_dns_server, primary=True):
tag = "primary"
else:
tag = "secondary"
- xpath = _XPATH_DNS_SERVERS+"/"+tag
+ xpath = _XPATH_DNS_SERVERS + "/" + tag
# check the current element value
xapi.get(xpath)
- val = xapi.element_root.find(".//"+tag)
+ val = xapi.element_root.find(".//" + tag)
if val is not None:
# element exists
val = val.text
@@ -135,11 +135,11 @@ def set_panorama_server(xapi, new_panorama_server, primary=True):
tag = "panorama-server"
else:
tag = "panorama-server-2"
- xpath = _XPATH_PANORAMA_SERVERS+"/"+tag
+ xpath = _XPATH_PANORAMA_SERVERS + "/" + tag
# check the current element value
xapi.get(xpath)
- val = xapi.element_root.find(".//"+tag)
+ val = xapi.element_root.find(".//" + tag)
if val is not None:
# element exists
val = val.text
diff --git a/lib/ansible/modules/network/panos/panos_pg.py b/lib/ansible/modules/network/panos/panos_pg.py
index 8cc39841cb..555d1f38c0 100644
--- a/lib/ansible/modules/network/panos/panos_pg.py
+++ b/lib/ansible/modules/network/panos/panos_pg.py
@@ -101,7 +101,7 @@ EXAMPLES = '''
vulnerability: "default"
'''
-RETURN='''
+RETURN = '''
# Default return values
'''
diff --git a/lib/ansible/modules/network/sros/sros_command.py b/lib/ansible/modules/network/sros/sros_command.py
index b647955af6..1a24fd29a8 100644
--- a/lib/ansible/modules/network/sros/sros_command.py
+++ b/lib/ansible/modules/network/sros/sros_command.py
@@ -148,6 +148,7 @@ def to_lines(stdout):
item = str(item).split('\n')
yield item
+
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
@@ -168,6 +169,7 @@ def parse_commands(module, warnings):
)
return commands
+
def main():
"""main entry point for module execution
"""
@@ -221,7 +223,6 @@ def main():
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
-
result = {
'changed': False,
'stdout': responses,
diff --git a/lib/ansible/modules/network/sros/sros_config.py b/lib/ansible/modules/network/sros/sros_config.py
index 6a59e25398..7b65ba886d 100644
--- a/lib/ansible/modules/network/sros/sros_config.py
+++ b/lib/ansible/modules/network/sros/sros_config.py
@@ -275,6 +275,7 @@ def run(module, result):
load_config(module, commands)
result['changed'] = True
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/sros/sros_rollback.py b/lib/ansible/modules/network/sros/sros_rollback.py
index e85192d086..41f9e3b095 100644
--- a/lib/ansible/modules/network/sros/sros_rollback.py
+++ b/lib/ansible/modules/network/sros/sros_rollback.py
@@ -108,6 +108,7 @@ def invoke(name, *args, **kwargs):
if func:
return func(*args, **kwargs)
+
def sanitize_config(lines):
commands = list()
for line in lines:
@@ -118,6 +119,7 @@ def sanitize_config(lines):
commands.append(line)
return commands
+
def present(module, commands):
setters = set()
for key, value in module.argument_spec.items():
@@ -127,6 +129,7 @@ def present(module, commands):
setters.add(setter)
invoke(setter, module, commands)
+
def absent(module, commands):
config = get_config(module)
if 'rollback-location' in config:
@@ -138,30 +141,36 @@ def absent(module, commands):
if 'local-max-checkpoints' in config:
commands.append('configure system rollback no remote-max-checkpoints')
+
def set_rollback_location(module, commands):
value = module.params['rollback_location']
commands.append('configure system rollback rollback-location "%s"' % value)
+
def set_local_max_checkpoints(module, commands):
value = module.params['local_max_checkpoints']
if not 1 <= value <= 50:
module.fail_json(msg='local_max_checkpoints must be between 1 and 50')
commands.append('configure system rollback local-max-checkpoints %s' % value)
+
def set_remote_max_checkpoints(module, commands):
value = module.params['remote_max_checkpoints']
if not 1 <= value <= 50:
module.fail_json(msg='remote_max_checkpoints must be between 1 and 50')
commands.append('configure system rollback remote-max-checkpoints %s' % value)
+
def set_rescue_location(module, commands):
value = module.params['rescue_location']
commands.append('configure system rollback rescue-location "%s"' % value)
+
def get_device_config(module):
contents = get_config(module)
return NetworkConfig(indent=4, contents=contents)
+
def main():
""" main entry point for module execution
"""
diff --git a/lib/ansible/modules/network/vyos/vyos_command.py b/lib/ansible/modules/network/vyos/vyos_command.py
index f46532dc49..798ec0077a 100644
--- a/lib/ansible/modules/network/vyos/vyos_command.py
+++ b/lib/ansible/modules/network/vyos/vyos_command.py
@@ -142,6 +142,7 @@ from ansible.module_utils.six import string_types
from ansible.module_utils.network.vyos.vyos import run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
+
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
diff --git a/lib/ansible/modules/network/vyos/vyos_system.py b/lib/ansible/modules/network/vyos/vyos_system.py
index 40b520de5a..12f607afd8 100644
--- a/lib/ansible/modules/network/vyos/vyos_system.py
+++ b/lib/ansible/modules/network/vyos/vyos_system.py
@@ -162,6 +162,7 @@ def spec_to_commands(want, have):
return commands
+
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
diff --git a/lib/ansible/modules/notification/campfire.py b/lib/ansible/modules/notification/campfire.py
index f3b93be60f..cf64381ddc 100644
--- a/lib/ansible/modules/notification/campfire.py
+++ b/lib/ansible/modules/notification/campfire.py
@@ -122,21 +122,21 @@ def main():
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
- 'User-agent': AGENT}
+ 'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
- " returned error code: '%s'" %
+ " returned error code: '%s'" %
(notify, info['status']))
# Send the message
- response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
+ response, info = fetch_url(module, target_url, data=MSTR % cgi.escape(msg), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
- " returned error code: '%s'" %
+ " returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
diff --git a/lib/ansible/modules/notification/flowdock.py b/lib/ansible/modules/notification/flowdock.py
index c17c897041..7e41e663b1 100644
--- a/lib/ansible/modules/notification/flowdock.py
+++ b/lib/ansible/modules/notification/flowdock.py
@@ -115,7 +115,7 @@ def main():
argument_spec=dict(
token=dict(required=True, no_log=True),
msg=dict(required=True),
- type=dict(required=True, choices=["inbox","chat"]),
+ type=dict(required=True, choices=["inbox", "chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
@@ -125,7 +125,7 @@ def main():
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
- validate_certs = dict(default='yes', type='bool'),
+ validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
@@ -152,7 +152,7 @@ def main():
module.fail_json(msg="external_user_name is required for the 'chat' type")
# required params for the 'inbox' type
- for item in [ 'from_address', 'source', 'subject' ]:
+ for item in ['from_address', 'source', 'subject']:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
@@ -166,7 +166,7 @@ def main():
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
- for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
+ for item in ['from_name', 'reply_to', 'project', 'link']:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
diff --git a/lib/ansible/modules/notification/grove.py b/lib/ansible/modules/notification/grove.py
index 6734039260..d28e780358 100644
--- a/lib/ansible/modules/notification/grove.py
+++ b/lib/ansible/modules/notification/grove.py
@@ -71,6 +71,7 @@ BASE_URL = 'https://grove.io/api/notice/%s/'
# ==============================================================
# do_notify_grove
+
def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
my_url = BASE_URL % (channel_token,)
@@ -88,15 +89,16 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url=
# ==============================================================
# main
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- channel_token = dict(type='str', required=True, no_log=True),
- message = dict(type='str', required=True),
- service = dict(type='str', default='ansible'),
- url = dict(type='str', default=None),
- icon_url = dict(type='str', default=None),
- validate_certs = dict(default='yes', type='bool'),
+ argument_spec=dict(
+ channel_token=dict(type='str', required=True, no_log=True),
+ message=dict(type='str', required=True),
+ service=dict(type='str', default='ansible'),
+ url=dict(type='str', default=None),
+ icon_url=dict(type='str', default=None),
+ validate_certs=dict(default='yes', type='bool'),
)
)
diff --git a/lib/ansible/modules/notification/hall.py b/lib/ansible/modules/notification/hall.py
index 69d39d343e..c1f11222b0 100644
--- a/lib/ansible/modules/notification/hall.py
+++ b/lib/ansible/modules/notification/hall.py
@@ -62,12 +62,12 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
-HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
+HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
headers = {'Content-Type': 'application/json'}
- payload=module.jsonify(payload)
+ payload = module.jsonify(payload)
api_endpoint = HALL_API_ENDPOINT % (room_token)
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
if info['status'] != 200:
diff --git a/lib/ansible/modules/notification/irc.py b/lib/ansible/modules/notification/irc.py
index 1bee674bfd..51809d82a0 100644
--- a/lib/ansible/modules/notification/irc.py
+++ b/lib/ansible/modules/notification/irc.py
@@ -259,11 +259,11 @@ def main():
nick_to=dict(required=False, type='list'),
msg=dict(required=True),
color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
- "green", "red", "brown",
- "purple", "orange", "yellow",
- "light_green", "teal", "light_cyan",
- "light_blue", "pink", "gray",
- "light_gray", "none"]),
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
channel=dict(required=False),
key=dict(no_log=True),
diff --git a/lib/ansible/modules/notification/jabber.py b/lib/ansible/modules/notification/jabber.py
index b97815f7fe..f992817674 100644
--- a/lib/ansible/modules/notification/jabber.py
+++ b/lib/ansible/modules/notification/jabber.py
@@ -105,7 +105,7 @@ def main():
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
- port=dict(required=False,default=5222),
+ port=dict(required=False, default=5222),
encoding=dict(required=False),
),
supports_check_mode=True
@@ -134,15 +134,15 @@ def main():
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
- conn=xmpp.Client(server, debug=[])
- if not conn.connect(server=(host,port)):
+ conn = xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host, port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
- if not conn.auth(user,password,'Ansible'):
- module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server))
+ if not conn.auth(user, password, 'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
# some old servers require this, also the sleep following send
conn.sendInitPresence(requestRoster=0)
- if nick: # sending to room instead of user, need to join
+ if nick: # sending to room instead of user, need to join
msg.setType('groupchat')
msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
conn.send(xmpp.Presence(to=module.params['to']))
diff --git a/lib/ansible/modules/notification/mattermost.py b/lib/ansible/modules/notification/mattermost.py
index 1ecf4d6d5a..6e9b03ba9a 100644
--- a/lib/ansible/modules/notification/mattermost.py
+++ b/lib/ansible/modules/notification/mattermost.py
@@ -98,54 +98,54 @@ from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
supports_check_mode=True,
- argument_spec = dict(
- url = dict(type='str', required=True),
- api_key = dict(type='str', required=True, no_log=True),
- text = dict(type='str', required=True),
- channel = dict(type='str', default=None),
- username = dict(type='str', default='Ansible'),
- icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
- validate_certs = dict(default='yes', type='bool'),
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ api_key=dict(type='str', required=True, no_log=True),
+ text=dict(type='str', required=True),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ validate_certs=dict(default='yes', type='bool'),
)
)
- #init return dict
+ # init return dict
result = dict(changed=False, msg="OK")
- #define webhook
- webhook_url = "{0}/hooks/{1}".format(module.params['url'],module.params['api_key'])
+ # define webhook
+ webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
result['webhook_url'] = webhook_url
- #define payload
- payload = { }
+ # define payload
+ payload = {}
for param in ['text', 'channel', 'username', 'icon_url']:
if module.params[param] is not None:
payload[param] = module.params[param]
- payload=module.jsonify(payload)
+ payload = module.jsonify(payload)
result['payload'] = payload
- #http headers
+ # http headers
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
- #notes:
- #Nothing is done in check mode
- #it'll pass even if your server is down or/and if your token is invalid.
- #If someone find good way to check...
+ # notes:
+ # Nothing is done in check mode
+ # it'll pass even if your server is down or/and if your token is invalid.
+ # If someone find good way to check...
- #send request if not in test mode
+ # send request if not in test mode
if module.check_mode is False:
response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
- #something's wrong
+ # something's wrong
if info['status'] != 200:
- #some problem
+ # some problem
result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
module.fail_json(**result)
- #Looks good
+ # Looks good
module.exit_json(**result)
diff --git a/lib/ansible/modules/notification/mqtt.py b/lib/ansible/modules/notification/mqtt.py
index 09fbe09ecf..487ecf4f74 100644
--- a/lib/ansible/modules/notification/mqtt.py
+++ b/lib/ansible/modules/notification/mqtt.py
@@ -144,18 +144,18 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- server = dict(default = 'localhost'),
- port = dict(default = 1883, type='int'),
- topic = dict(required = True),
- payload = dict(required = True),
- client_id = dict(default = None),
- qos = dict(default="0", choices=["0", "1", "2"]),
- retain = dict(default=False, type='bool'),
- username = dict(default = None),
- password = dict(default = None, no_log=True),
- ca_certs = dict(default = None, type='path'),
- certfile = dict(default = None, type='path'),
- keyfile = dict(default = None, type='path'),
+ server=dict(default='localhost'),
+ port=dict(default=1883, type='int'),
+ topic=dict(required=True),
+ payload=dict(required=True),
+ client_id=dict(default=None),
+ qos=dict(default="0", choices=["0", "1", "2"]),
+ retain=dict(default=False, type='bool'),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_certs=dict(default=None, type='path'),
+ certfile=dict(default=None, type='path'),
+ keyfile=dict(default=None, type='path'),
),
supports_check_mode=True
)
@@ -163,18 +163,18 @@ def main():
if not HAS_PAHOMQTT:
module.fail_json(msg="Paho MQTT is not installed")
- server = module.params.get("server", 'localhost')
- port = module.params.get("port", 1883)
- topic = module.params.get("topic")
- payload = module.params.get("payload")
- client_id = module.params.get("client_id", '')
- qos = int(module.params.get("qos", 0))
- retain = module.params.get("retain")
- username = module.params.get("username", None)
- password = module.params.get("password", None)
- ca_certs = module.params.get("ca_certs", None)
- certfile = module.params.get("certfile", None)
- keyfile = module.params.get("keyfile", None)
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_certs", None)
+ certfile = module.params.get("certfile", None)
+ keyfile = module.params.get("keyfile", None)
if client_id is None:
client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
@@ -182,11 +182,11 @@ def main():
if payload and payload == 'None':
payload = None
- auth=None
+ auth = None
if username is not None:
- auth = { 'username' : username, 'password' : password }
+ auth = {'username': username, 'password': password}
- tls=None
+ tls = None
if ca_certs is not None:
tls = {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile}
diff --git a/lib/ansible/modules/notification/osx_say.py b/lib/ansible/modules/notification/osx_say.py
index 6920aea15b..bd9ea5de00 100644
--- a/lib/ansible/modules/notification/osx_say.py
+++ b/lib/ansible/modules/notification/osx_say.py
@@ -48,7 +48,7 @@ import os
from ansible.module_utils.basic import AnsibleModule
-DEFAULT_VOICE='Alex'
+DEFAULT_VOICE = 'Alex'
def say(module, msg, voice):
@@ -68,7 +68,7 @@ def main():
if not os.path.exists("/usr/bin/say"):
module.fail_json(msg="/usr/bin/say is not installed")
- msg = module.params['msg']
+ msg = module.params['msg']
voice = module.params['voice']
say(module, msg, voice)
diff --git a/lib/ansible/modules/notification/pushbullet.py b/lib/ansible/modules/notification/pushbullet.py
index b9ff08dc5a..a7d50c7175 100644
--- a/lib/ansible/modules/notification/pushbullet.py
+++ b/lib/ansible/modules/notification/pushbullet.py
@@ -107,28 +107,28 @@ from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
- argument_spec = dict(
- api_key = dict(type='str', required=True, no_log=True),
- channel = dict(type='str', default=None),
- device = dict(type='str', default=None),
- push_type = dict(type='str', default="note", choices=['note', 'link']),
- title = dict(type='str', required=True),
- body = dict(type='str', default=None),
- url = dict(type='str', default=None),
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ channel=dict(type='str', default=None),
+ device=dict(type='str', default=None),
+ push_type=dict(type='str', default="note", choices=['note', 'link']),
+ title=dict(type='str', required=True),
+ body=dict(type='str', default=None),
+ url=dict(type='str', default=None),
),
- mutually_exclusive = (
+ mutually_exclusive=(
['channel', 'device'],
),
supports_check_mode=True
)
- api_key = module.params['api_key']
- channel = module.params['channel']
- device = module.params['device']
- push_type = module.params['push_type']
- title = module.params['title']
- body = module.params['body']
- url = module.params['url']
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
if not pushbullet_found:
module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py")
diff --git a/lib/ansible/modules/notification/pushover.py b/lib/ansible/modules/notification/pushover.py
index 8c478c4346..33c775e1d0 100644
--- a/lib/ansible/modules/notification/pushover.py
+++ b/lib/ansible/modules/notification/pushover.py
@@ -73,12 +73,12 @@ class Pushover(object):
# parse config
options = dict(user=self.user,
- token=self.token,
- priority=priority,
- message=msg)
+ token=self.token,
+ priority=priority,
+ message=msg)
data = urlencode(options)
- headers = { "Content-type": "application/x-www-form-urlencoded"}
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
if info['status'] != 200:
raise Exception(info)
@@ -93,7 +93,7 @@ def main():
msg=dict(required=True),
app_token=dict(required=True, no_log=True),
user_key=dict(required=True, no_log=True),
- pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']),
+ pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
),
)
diff --git a/lib/ansible/modules/notification/rocketchat.py b/lib/ansible/modules/notification/rocketchat.py
index 49d0b77fa2..420b3258b8 100644
--- a/lib/ansible/modules/notification/rocketchat.py
+++ b/lib/ansible/modules/notification/rocketchat.py
@@ -196,9 +196,10 @@ def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon
attachment['fallback'] = attachment['text']
payload['attachments'].append(attachment)
- payload="payload=" + module.jsonify(payload)
+ payload = "payload=" + module.jsonify(payload)
return payload
+
def do_notify_rocketchat(module, domain, token, protocol, payload):
if token.count('/') < 1:
@@ -210,21 +211,22 @@ def do_notify_rocketchat(module, domain, token, protocol, payload):
if info['status'] != 200:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- domain = dict(type='str', required=True, default=None),
- token = dict(type='str', required=True, no_log=True),
- protocol = dict(type='str', default='https', choices=['http', 'https']),
- msg = dict(type='str', required=False, default=None),
- channel = dict(type='str', default=None),
- username = dict(type='str', default='Ansible'),
- icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
- icon_emoji = dict(type='str', default=None),
- link_names = dict(type='int', default=1, choices=[0,1]),
- validate_certs = dict(default='yes', type='bool'),
- color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
- attachments = dict(type='list', required=False, default=None)
+ argument_spec=dict(
+ domain=dict(type='str', required=True, default=None),
+ token=dict(type='str', required=True, no_log=True),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ msg=dict(type='str', required=False, default=None),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str', default=None),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ validate_certs=dict(default='yes', type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', required=False, default=None)
)
)
diff --git a/lib/ansible/modules/notification/sendgrid.py b/lib/ansible/modules/notification/sendgrid.py
index 24e48d39fe..88e476a365 100644
--- a/lib/ansible/modules/notification/sendgrid.py
+++ b/lib/ansible/modules/notification/sendgrid.py
@@ -145,14 +145,14 @@ from ansible.module_utils.urls import fetch_url
def post_sendgrid_api(module, username, password, from_address, to_addresses,
- subject, body, api_key=None, cc=None, bcc=None, attachments=None,
- html_body=False, from_name=None, headers=None):
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
if not HAS_SENDGRID:
SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
AGENT = "Ansible"
- data = {'api_user': username, 'api_key':password,
- 'from':from_address, 'subject': subject, 'text': body}
+ data = {'api_user': username, 'api_key': password,
+ 'from': from_address, 'subject': subject, 'text': body}
encoded_data = urlencode(data)
to_addresses_api = ''
for recipient in to_addresses:
@@ -160,9 +160,9 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses,
to_addresses_api += '&to[]=%s' % recipient
encoded_data += to_addresses_api
- headers = { 'User-Agent': AGENT,
- 'Content-type': 'application/x-www-form-urlencoded',
- 'Accept': 'application/json'}
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
else:
@@ -207,6 +207,7 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses,
# Main
#
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -225,11 +226,11 @@ def main():
attachments=dict(required=False, type='list')
),
supports_check_mode=True,
- mutually_exclusive = [
+ mutually_exclusive=[
['api_key', 'password'],
['api_key', 'username']
- ],
- required_together = [['username', 'password']],
+ ],
+ required_together=[['username', 'password']],
)
username = module.params['username']
@@ -253,8 +254,8 @@ def main():
'api_key, bcc, cc, headers, from_name, html_body, attachments')
response, info = post_sendgrid_api(module, username, password,
- from_address, to_addresses, subject, body, attachments=attachments,
- bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
if not HAS_SENDGRID:
if info['status'] != 200:
diff --git a/lib/ansible/modules/notification/slack.py b/lib/ansible/modules/notification/slack.py
index 628da6fdfe..c9f7fdedbb 100644
--- a/lib/ansible/modules/notification/slack.py
+++ b/lib/ansible/modules/notification/slack.py
@@ -189,7 +189,7 @@ escape_table = {
def escape_quotes(text):
'''Backslash any quotes within text.'''
- return "".join(escape_table.get(c,c) for c in text)
+ return "".join(escape_table.get(c, c) for c in text)
def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments):
@@ -203,7 +203,7 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
if (channel[0] == '#') or (channel[0] == '@'):
payload['channel'] = channel
else:
- payload['channel'] = '#'+channel
+ payload['channel'] = '#' + channel
if username is not None:
payload['username'] = username
if icon_emoji is not None:
@@ -237,9 +237,10 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
payload['attachments'].append(attachment)
- payload=module.jsonify(payload)
+ payload = module.jsonify(payload)
return payload
+
def do_notify_slack(module, domain, token, payload):
if token.count('/') >= 2:
# New style token
@@ -259,21 +260,22 @@ def do_notify_slack(module, domain, token, payload):
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- domain = dict(type='str', required=False, default=None),
- token = dict(type='str', required=True, no_log=True),
- msg = dict(type='str', required=False, default=None),
- channel = dict(type='str', default=None),
- username = dict(type='str', default='Ansible'),
- icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
- icon_emoji = dict(type='str', default=None),
- link_names = dict(type='int', default=1, choices=[0,1]),
- parse = dict(type='str', default=None, choices=['none', 'full']),
- validate_certs = dict(default='yes', type='bool'),
- color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
- attachments = dict(type='list', required=False, default=None)
+ argument_spec=dict(
+ domain=dict(type='str', required=False, default=None),
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=False, default=None),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str', default=None),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ parse=dict(type='str', default=None, choices=['none', 'full']),
+ validate_certs=dict(default='yes', type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', required=False, default=None)
)
)
diff --git a/lib/ansible/modules/notification/twilio.py b/lib/ansible/modules/notification/twilio.py
index b9d1c2260a..0fb8f59f30 100644
--- a/lib/ansible/modules/notification/twilio.py
+++ b/lib/ansible/modules/notification/twilio.py
@@ -112,15 +112,15 @@ def post_twilio_api(module, account_sid, auth_token, msg, from_number,
% (account_sid,)
AGENT = "Ansible"
- data = {'From':from_number, 'To':to_number, 'Body':msg}
+ data = {'From': from_number, 'To': to_number, 'Body': msg}
if media_url:
data['MediaUrl'] = media_url
encoded_data = urlencode(data)
headers = {'User-Agent': AGENT,
- 'Content-type': 'application/x-www-form-urlencoded',
- 'Accept': 'application/json',
- }
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = account_sid.replace('\n', '')
@@ -159,7 +159,7 @@ def main():
for number in to_number:
r, info = post_twilio_api(module, account_sid, auth_token, msg,
- from_number, number, media_url)
+ from_number, number, media_url)
if info['status'] not in [200, 201]:
body_message = "unknown error"
if 'body' in info:
diff --git a/lib/ansible/modules/packaging/language/bundler.py b/lib/ansible/modules/packaging/language/bundler.py
index 88013b6ed4..a82394829e 100644
--- a/lib/ansible/modules/packaging/language/bundler.py
+++ b/lib/ansible/modules/packaging/language/bundler.py
@@ -13,7 +13,7 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community'}
-DOCUMENTATION='''
+DOCUMENTATION = '''
---
module: bundler
short_description: Manage Ruby Gem dependencies with Bundler
@@ -104,7 +104,7 @@ options:
author: "Tim Hoiberg (@thoiberg)"
'''
-EXAMPLES='''
+EXAMPLES = '''
# Installs gems from a Gemfile in the current directory
- bundler:
state: present
@@ -159,7 +159,7 @@ def main():
extra_args=dict(default=None, required=False),
),
supports_check_mode=True
- )
+ )
state = module.params.get('state')
chdir = module.params.get('chdir')
diff --git a/lib/ansible/modules/packaging/language/cpanm.py b/lib/ansible/modules/packaging/language/cpanm.py
index 3195775561..fcd22b6812 100644
--- a/lib/ansible/modules/packaging/language/cpanm.py
+++ b/lib/ansible/modules/packaging/language/cpanm.py
@@ -141,6 +141,7 @@ def _is_package_installed(module, name, locallib, cpanm, version):
res, stdout, stderr = module.run_command(cmd, check_rc=False)
return res == 0
+
def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
# this code should use "%s" like everything else and just return early but not fixing all of it now.
# don't copy stuff like this
@@ -197,23 +198,23 @@ def main():
required_one_of=[['name', 'from_path']],
)
- cpanm = _get_cpanm_path(module)
- name = module.params['name']
- from_path = module.params['from_path']
- notest = module.boolean(module.params.get('notest', False))
- locallib = module.params['locallib']
- mirror = module.params['mirror']
+ cpanm = _get_cpanm_path(module)
+ name = module.params['name']
+ from_path = module.params['from_path']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
mirror_only = module.params['mirror_only']
installdeps = module.params['installdeps']
- use_sudo = module.params['system_lib']
- version = module.params['version']
+ use_sudo = module.params['system_lib']
+ version = module.params['version']
- changed = False
+ changed = False
installed = _is_package_installed(module, name, locallib, cpanm, version)
if not installed:
- cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
diff --git a/lib/ansible/modules/packaging/language/gem.py b/lib/ansible/modules/packaging/language/gem.py
index 900b3ac696..4e27004024 100644
--- a/lib/ansible/modules/packaging/language/gem.py
+++ b/lib/ansible/modules/packaging/language/gem.py
@@ -121,8 +121,9 @@ def get_rubygems_path(module):
result = [module.get_bin_path('gem', True)]
return result
+
def get_rubygems_version(module):
- cmd = get_rubygems_path(module) + [ '--version' ]
+ cmd = get_rubygems_path(module) + ['--version']
(rc, out, err) = module.run_command(cmd, check_rc=True)
match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
@@ -131,6 +132,7 @@ def get_rubygems_version(module):
return tuple(int(x) for x in match.groups())
+
def get_installed_versions(module, remote=False):
cmd = get_rubygems_path(module)
@@ -138,7 +140,7 @@ def get_installed_versions(module, remote=False):
if remote:
cmd.append('--remote')
if module.params['repository']:
- cmd.extend([ '--source', module.params['repository'] ])
+ cmd.extend(['--source', module.params['repository']])
cmd.append('-n')
cmd.append('^%s$' % module.params['name'])
(rc, out, err) = module.run_command(cmd, check_rc=True)
@@ -151,6 +153,7 @@ def get_installed_versions(module, remote=False):
installed_versions.append(version.split()[0])
return installed_versions
+
def exists(module):
if module.params['state'] == 'latest':
@@ -166,6 +169,7 @@ def exists(module):
return True
return False
+
def uninstall(module):
if module.check_mode:
@@ -173,13 +177,14 @@ def uninstall(module):
cmd = get_rubygems_path(module)
cmd.append('uninstall')
if module.params['version']:
- cmd.extend([ '--version', module.params['version'] ])
+ cmd.extend(['--version', module.params['version']])
else:
cmd.append('--all')
cmd.append('--executable')
cmd.append(module.params['name'])
module.run_command(cmd, check_rc=True)
+
def install(module):
if module.check_mode:
@@ -194,9 +199,9 @@ def install(module):
cmd = get_rubygems_path(module)
cmd.append('install')
if module.params['version']:
- cmd.extend([ '--version', module.params['version'] ])
+ cmd.extend(['--version', module.params['version']])
if module.params['repository']:
- cmd.extend([ '--source', module.params['repository'] ])
+ cmd.extend(['--source', module.params['repository']])
if not module.params['include_dependencies']:
cmd.append('--ignore-dependencies')
else:
@@ -218,28 +223,29 @@ def install(module):
cmd.append('--env-shebang')
cmd.append(module.params['gem_source'])
if module.params['build_flags']:
- cmd.extend([ '--', module.params['build_flags'] ])
+ cmd.extend(['--', module.params['build_flags']])
module.run_command(cmd, check_rc=True)
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- executable = dict(required=False, type='path'),
- gem_source = dict(required=False, type='path'),
- include_dependencies = dict(required=False, default=True, type='bool'),
- name = dict(required=True, type='str'),
- repository = dict(required=False, aliases=['source'], type='str'),
- state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'),
- user_install = dict(required=False, default=True, type='bool'),
- pre_release = dict(required=False, default=False, type='bool'),
- include_doc = dict(required=False, default=False, type='bool'),
- env_shebang = dict(required=False, default=False, type='bool'),
- version = dict(required=False, type='str'),
- build_flags = dict(required=False, type='str'),
+ argument_spec=dict(
+ executable=dict(required=False, type='path'),
+ gem_source=dict(required=False, type='path'),
+ include_dependencies=dict(required=False, default=True, type='bool'),
+ name=dict(required=True, type='str'),
+ repository=dict(required=False, aliases=['source'], type='str'),
+ state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(required=False, default=True, type='bool'),
+ pre_release=dict(required=False, default=False, type='bool'),
+ include_doc=dict(required=False, default=False, type='bool'),
+ env_shebang=dict(required=False, default=False, type='bool'),
+ version=dict(required=False, type='str'),
+ build_flags=dict(required=False, type='str'),
),
- supports_check_mode = True,
- mutually_exclusive = [ ['gem_source','repository'], ['gem_source','version'] ],
+ supports_check_mode=True,
+ mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
)
if module.params['version'] and module.params['state'] == 'latest':
@@ -252,7 +258,7 @@ def main():
changed = False
- if module.params['state'] in [ 'present', 'latest']:
+ if module.params['state'] in ['present', 'latest']:
if not exists(module):
install(module)
changed = True
diff --git a/lib/ansible/modules/packaging/language/maven_artifact.py b/lib/ansible/modules/packaging/language/maven_artifact.py
index 540b63a458..2a8cd8bbc2 100644
--- a/lib/ansible/modules/packaging/language/maven_artifact.py
+++ b/lib/ansible/modules/packaging/language/maven_artifact.py
@@ -315,12 +315,12 @@ class MavenDownloader:
def _request(self, url, failmsg, f):
url_to_use = url
parsed_url = urlparse(url)
- if parsed_url.scheme=='s3':
+ if parsed_url.scheme == 's3':
parsed_url = urlparse(url)
bucket_name = parsed_url.netloc
key_name = parsed_url.path[1:]
- client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
- url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
+ client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
req_timeout = self.module.params.get('timeout')
@@ -335,7 +335,6 @@ class MavenDownloader:
else:
return f(response)
-
def download(self, artifact, filename=None):
filename = artifact.get_filename(filename)
if not artifact.version or artifact.version == "latest":
@@ -402,20 +401,20 @@ class MavenDownloader:
def main():
module = AnsibleModule(
- argument_spec = dict(
- group_id = dict(default=None),
- artifact_id = dict(default=None),
- version = dict(default="latest"),
- classifier = dict(default=''),
- extension = dict(default='jar'),
- repository_url = dict(default=None),
- username = dict(default=None,aliases=['aws_secret_key']),
- password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']),
- state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state
- timeout = dict(default=10, type='int'),
- dest = dict(type="path", default=None),
- validate_certs = dict(required=False, default=True, type='bool'),
- keep_name = dict(required=False, default=False, type='bool'),
+ argument_spec=dict(
+ group_id=dict(default=None),
+ artifact_id=dict(default=None),
+ version=dict(default="latest"),
+ classifier=dict(default=''),
+ extension=dict(default='jar'),
+ repository_url=dict(default=None),
+ username=dict(default=None, aliases=['aws_secret_key']),
+ password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
+ timeout=dict(default=10, type='int'),
+ dest=dict(type="path", default=None),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ keep_name=dict(required=False, default=False, type='bool'),
),
add_file_common_args=True
)
@@ -429,7 +428,7 @@ def main():
except AttributeError as e:
module.fail_json(msg='url parsing went wrong %s' % e)
- if parsed_url.scheme=='s3' and not HAS_BOTO:
+ if parsed_url.scheme == 's3' and not HAS_BOTO:
module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')
group_id = module.params["group_id"]
diff --git a/lib/ansible/modules/packaging/language/pear.py b/lib/ansible/modules/packaging/language/pear.py
index a4b738d170..0d0f19a8db 100644
--- a/lib/ansible/modules/packaging/language/pear.py
+++ b/lib/ansible/modules/packaging/language/pear.py
@@ -83,6 +83,7 @@ def get_local_version(pear_output):
return installed
return None
+
def _get_pear_path(module):
if module.params['executable'] and os.path.isfile(module.params['executable']):
result = module.params['executable']
@@ -90,6 +91,7 @@ def _get_pear_path(module):
result = module.get_bin_path('pear', True, [module.params['executable']])
return result
+
def get_repository_version(pear_output):
"""Take pear remote-info output and get the latest version"""
lines = pear_output.split('\n')
@@ -98,6 +100,7 @@ def get_repository_version(pear_output):
return line.rsplit(None, 1)[-1].strip()
return None
+
def query_package(module, name, state="present"):
"""Query the package status in both the local system and the repository.
Returns a boolean to indicate if the package is installed,
@@ -198,18 +201,14 @@ def check_packages(module, packages, state):
module.exit_json(change=False, msg="package(s) already %s" % state)
-
-
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(aliases=['pkg']),
- state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
- executable = dict(default=None, required=False, type='path')),
- required_one_of = [['name']],
- supports_check_mode = True)
-
-
+ argument_spec=dict(
+ name=dict(aliases=['pkg']),
+ state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ executable=dict(default=None, required=False, type='path')),
+ required_one_of=[['name']],
+ supports_check_mode=True)
p = module.params
diff --git a/lib/ansible/modules/packaging/os/apk.py b/lib/ansible/modules/packaging/os/apk.py
index 0d1e1f73f2..8015abe642 100644
--- a/lib/ansible/modules/packaging/os/apk.py
+++ b/lib/ansible/modules/packaging/os/apk.py
@@ -147,6 +147,7 @@ import re
# Import module snippets.
from ansible.module_utils.basic import AnsibleModule
+
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
@@ -157,6 +158,7 @@ def parse_for_packages(stdout):
packages.append(p.group(1))
return packages
+
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
@@ -167,6 +169,7 @@ def update_package_db(module, exit):
else:
return True
+
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
@@ -175,6 +178,7 @@ def query_package(module, name):
else:
return False
+
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
@@ -184,6 +188,7 @@ def query_latest(module, name):
return False
return True
+
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
@@ -192,6 +197,7 @@ def query_virtual(module, name):
return True
return False
+
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
@@ -201,6 +207,7 @@ def get_dependencies(module, name):
else:
return []
+
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
@@ -216,6 +223,7 @@ def upgrade_packages(module, available):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
+
def install_packages(module, names, state):
upgrade = False
to_install = []
@@ -254,6 +262,7 @@ def install_packages(module, names, state):
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+
def remove_packages(module, names):
installed = []
for name in names:
@@ -275,6 +284,7 @@ def remove_packages(module, names):
# ==========================================
# Main control flow.
+
def main():
module = AnsibleModule(
argument_spec=dict(
diff --git a/lib/ansible/modules/packaging/os/dpkg_selections.py b/lib/ansible/modules/packaging/os/dpkg_selections.py
index 5a264672b8..b4f1f4f893 100644
--- a/lib/ansible/modules/packaging/os/dpkg_selections.py
+++ b/lib/ansible/modules/packaging/os/dpkg_selections.py
@@ -41,6 +41,7 @@ EXAMPLES = '''
selection: hold
'''
+
def main():
module = AnsibleModule(
argument_spec=dict(
diff --git a/lib/ansible/modules/packaging/os/homebrew.py b/lib/ansible/modules/packaging/os/homebrew.py
index 5861eaed96..ba7a039ccd 100644
--- a/lib/ansible/modules/packaging/os/homebrew.py
+++ b/lib/ansible/modules/packaging/os/homebrew.py
@@ -181,9 +181,9 @@ class Homebrew(object):
@ # at-sign
'''
- INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
- INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
- INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
+ INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
diff --git a/lib/ansible/modules/packaging/os/homebrew_cask.py b/lib/ansible/modules/packaging/os/homebrew_cask.py
index 7e43be37b0..0d3ebd590b 100644
--- a/lib/ansible/modules/packaging/os/homebrew_cask.py
+++ b/lib/ansible/modules/packaging/os/homebrew_cask.py
@@ -137,9 +137,9 @@ class HomebrewCask(object):
- # dashes
'''
- INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
- INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
- INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
+ INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@@ -589,7 +589,7 @@ def main():
for install_option in p['install_options']]
brew_cask = HomebrewCask(module=module, path=path, casks=casks,
- state=state, update_homebrew=update_homebrew,
+ state=state, update_homebrew=update_homebrew,
install_options=install_options)
(failed, changed, message) = brew_cask.run()
if failed:
diff --git a/lib/ansible/modules/packaging/os/layman.py b/lib/ansible/modules/packaging/os/layman.py
index ef84797be9..7cdf1833e7 100644
--- a/lib/ansible/modules/packaging/os/layman.py
+++ b/lib/ansible/modules/packaging/os/layman.py
@@ -158,8 +158,8 @@ def install_overlay(module, name, list_url=None):
if not layman.is_repo(name):
if not list_url:
- raise ModuleError("Overlay '%s' is not on the list of known " \
- "overlays and URL of the remote list was not provided." % name)
+ raise ModuleError("Overlay '%s' is not on the list of known "
+ "overlays and URL of the remote list was not provided." % name)
overlay_defs = layman_conf.get_option('overlay_defs')
dest = path.join(overlay_defs, name + '.xml')
@@ -209,7 +209,7 @@ def sync_overlay(name):
layman = init_layman()
if not layman.sync(name):
- messages = [ str(item[1]) for item in layman.sync_results[2] ]
+ messages = [str(item[1]) for item in layman.sync_results[2]]
raise ModuleError(messages)
@@ -227,11 +227,11 @@ def sync_overlays():
def main():
# define module
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True),
- list_url = dict(aliases=['url']),
- state = dict(default="present", choices=['present', 'absent', 'updated']),
- validate_certs = dict(required=False, default=True, type='bool'),
+ argument_spec=dict(
+ name=dict(required=True),
+ list_url=dict(aliases=['url']),
+ state=dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs=dict(required=False, default=True, type='bool'),
),
supports_check_mode=True
)
diff --git a/lib/ansible/modules/packaging/os/macports.py b/lib/ansible/modules/packaging/os/macports.py
index 34a5683124..fc21ac5ff9 100644
--- a/lib/ansible/modules/packaging/os/macports.py
+++ b/lib/ansible/modules/packaging/os/macports.py
@@ -68,6 +68,7 @@ EXAMPLES = '''
import pipes
+
def update_package_db(module, port_path):
""" Updates packages list. """
@@ -196,10 +197,10 @@ def deactivate_packages(module, port_path, packages):
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(aliases=["pkg"], required=True),
- state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
- update_cache = dict(default="no", aliases=["update-cache"], type='bool')
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ update_cache=dict(default="no", aliases=["update-cache"], type='bool')
)
)
diff --git a/lib/ansible/modules/packaging/os/opkg.py b/lib/ansible/modules/packaging/os/opkg.py
index 9933cc807c..7fbe78702c 100644
--- a/lib/ansible/modules/packaging/os/opkg.py
+++ b/lib/ansible/modules/packaging/os/opkg.py
@@ -89,6 +89,7 @@ EXAMPLES = '''
import pipes
+
def update_package_db(module, opkg_path):
""" Updates packages list. """
diff --git a/lib/ansible/modules/packaging/os/pkgin.py b/lib/ansible/modules/packaging/os/pkgin.py
index d9eb723c64..3bccaeb677 100644
--- a/lib/ansible/modules/packaging/os/pkgin.py
+++ b/lib/ansible/modules/packaging/os/pkgin.py
@@ -132,6 +132,7 @@ EXAMPLES = '''
import re
+
def query_package(module, name):
"""Search for the package by name.
@@ -201,8 +202,8 @@ def query_package(module, name):
def format_action_message(module, action, count):
- vars = { "actioned": action,
- "count": count }
+ vars = {"actioned": action,
+ "count": count}
if module.check_mode:
message = "would have %(actioned)s %(count)d package" % vars
@@ -227,10 +228,10 @@ def format_pkgin_command(module, command, package=None):
else:
force = ""
- vars = { "pkgin": PKGIN_PATH,
- "command": command,
- "package": package,
- "force": force}
+ vars = {"pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
@@ -283,6 +284,7 @@ def install_packages(module, packages):
module.exit_json(changed=False, msg="package(s) already present")
+
def update_package_db(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "update"))
@@ -295,6 +297,7 @@ def update_package_db(module):
else:
module.fail_json(msg="could not update package db")
+
def do_upgrade_packages(module, full=False):
if full:
cmd = "full-upgrade"
@@ -310,12 +313,15 @@ def do_upgrade_packages(module, full=False):
else:
module.fail_json(msg="could not %s packages" % cmd)
+
def upgrade_packages(module):
do_upgrade_packages(module)
+
def full_upgrade_packages(module):
do_upgrade_packages(module, True)
+
def clean_cache(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "clean"))
@@ -327,18 +333,19 @@ def clean_cache(module):
else:
module.fail_json(msg="could not clean package cache")
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default="present", choices=["present","absent"]),
- name = dict(aliases=["pkg"], type='list'),
- update_cache = dict(default='no', type='bool'),
- upgrade = dict(default='no', type='bool'),
- full_upgrade = dict(default='no', type='bool'),
- clean = dict(default='no', type='bool'),
- force = dict(default='no', type='bool')),
- required_one_of = [['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
- supports_check_mode = True)
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], type='list'),
+ update_cache=dict(default='no', type='bool'),
+ upgrade=dict(default='no', type='bool'),
+ full_upgrade=dict(default='no', type='bool'),
+ clean=dict(default='no', type='bool'),
+ force=dict(default='no', type='bool')),
+ required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode=True)
global PKGIN_PATH
PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
diff --git a/lib/ansible/modules/packaging/os/pkgng.py b/lib/ansible/modules/packaging/os/pkgng.py
index 2f2f7ec8c1..a6e80ca9d0 100644
--- a/lib/ansible/modules/packaging/os/pkgng.py
+++ b/lib/ansible/modules/packaging/os/pkgng.py
@@ -111,6 +111,7 @@ EXAMPLES = '''
import re
from ansible.module_utils.basic import AnsibleModule
+
def query_package(module, pkgng_path, name, dir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
@@ -120,6 +121,7 @@ def query_package(module, pkgng_path, name, dir_arg):
return False
+
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
@@ -206,6 +208,7 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
return (False, "package(s) already present")
+
def annotation_query(module, pkgng_path, package, tag, dir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
@@ -219,10 +222,10 @@ def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
- % (pkgng_path, dir_arg, package, tag, value))
+ % (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not annotate %s: %s"
- % (package, out), stderr=err)
+ % (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
@@ -234,41 +237,43 @@ def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
# Annotation exists, nothing to do
return False
+
def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
- % (pkgng_path, dir_arg, package, tag))
+ % (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json(msg="could not delete annotation to %s: %s"
- % (package, out), stderr=err)
+ % (package, out), stderr=err)
return True
return False
+
def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
- % (package, tag))
+ % (package, tag))
elif _value == value:
# No change in value
return False
else:
- rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
- % (pkgng_path, dir_arg, package, tag, value))
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not change annotation annotation to %s: %s"
- % (package, out), stderr=err)
+ % (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
- re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
- _annotation).groupdict(),
- re.split(r',', annotation))
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
operation = {
'+': annotation_add,
@@ -285,6 +290,7 @@ def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
+
def autoremove_packages(module, pkgng_path, dir_arg):
rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
@@ -302,20 +308,21 @@ def autoremove_packages(module, pkgng_path, dir_arg):
return True, "autoremoved %d package(s)" % (autoremove_c)
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default="present", choices=["present","absent"], required=False),
- name = dict(aliases=["pkg"], required=True, type='list'),
- cached = dict(default=False, type='bool'),
- annotation = dict(default="", required=False),
- pkgsite = dict(default="", required=False),
- rootdir = dict(default="", required=False, type='path'),
- chroot = dict(default="", required=False, type='path'),
- jail = dict(default="", required=False, type='str'),
- autoremove = dict(default=False, type='bool')),
- supports_check_mode = True,
- mutually_exclusive =[["rootdir", "chroot", "jail"]])
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list'),
+ cached=dict(default=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
pkgng_path = module.get_bin_path('pkg', True)
diff --git a/lib/ansible/modules/packaging/os/pkgutil.py b/lib/ansible/modules/packaging/os/pkgutil.py
index 0a3d054ef5..e6fbeb16a7 100644
--- a/lib/ansible/modules/packaging/os/pkgutil.py
+++ b/lib/ansible/modules/packaging/os/pkgutil.py
@@ -69,6 +69,7 @@ EXAMPLES = '''
import os
import pipes
+
def package_installed(module, name):
cmd = ['pkginfo']
cmd.append('-q')
@@ -79,11 +80,12 @@ def package_installed(module, name):
else:
return False
+
def package_latest(module, name, site):
# Only supports one package
- cmd = [ 'pkgutil', '-U', '--single', '-c' ]
+ cmd = ['pkgutil', '-U', '--single', '-c']
if site is not None:
- cmd += [ '-t', site]
+ cmd += ['-t', site]
cmd.append(name)
rc, out, err = run_command(module, cmd)
# replace | tail -1 |grep -v SAME
@@ -91,45 +93,50 @@ def package_latest(module, name, site):
# at the end of the list
return 'SAME' in out.split('\n')[-2]
+
def run_command(module, cmd, **kwargs):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
return module.run_command(cmd, **kwargs)
+
def package_install(module, state, name, site, update_catalog):
- cmd = [ 'pkgutil', '-iy' ]
+ cmd = ['pkgutil', '-iy']
if update_catalog:
- cmd += [ '-U' ]
+ cmd += ['-U']
if site is not None:
- cmd += [ '-t', site ]
+ cmd += ['-t', site]
if state == 'latest':
- cmd += [ '-f' ]
+ cmd += ['-f']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
+
def package_upgrade(module, name, site, update_catalog):
- cmd = [ 'pkgutil', '-ufy' ]
+ cmd = ['pkgutil', '-ufy']
if update_catalog:
- cmd += [ '-U' ]
+ cmd += ['-U']
if site is not None:
- cmd += [ '-t', site ]
+ cmd += ['-t', site]
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
+
def package_uninstall(module, name):
- cmd = [ 'pkgutil', '-ry', name]
+ cmd = ['pkgutil', '-ry', name]
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True),
- state = dict(required = True, choices=['present', 'absent','latest']),
- site = dict(default = None),
- update_catalog = dict(required = False, default = False, type='bool'),
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent', 'latest']),
+ site=dict(default=None),
+ update_catalog=dict(required=False, default=False, type='bool'),
),
supports_check_mode=True
)
diff --git a/lib/ansible/modules/packaging/os/portage.py b/lib/ansible/modules/packaging/os/portage.py
index bc4f06bdc5..0cded3b76a 100644
--- a/lib/ansible/modules/packaging/os/portage.py
+++ b/lib/ansible/modules/packaging/os/portage.py
@@ -289,7 +289,7 @@ def sync_repositories(module, webrsync=False):
def emerge_packages(module, packages):
p = module.params
- if not (p['update'] or p['noreplace'] or p['state']=='latest'):
+ if not (p['update'] or p['noreplace'] or p['state'] == 'latest'):
for package in packages:
if not query_package(module, package, 'emerge'):
break
@@ -319,7 +319,7 @@ def emerge_packages(module, packages):
if p[flag]:
args.append(arg)
- if p['state'] and p['state']=='latest':
+ if p['state'] and p['state'] == 'latest':
args.append("--update")
if p['usepkg'] and p['usepkgonly']:
diff --git a/lib/ansible/modules/packaging/os/portinstall.py b/lib/ansible/modules/packaging/os/portinstall.py
index d937ed2b2d..fd099fe522 100644
--- a/lib/ansible/modules/packaging/os/portinstall.py
+++ b/lib/ansible/modules/packaging/os/portinstall.py
@@ -138,7 +138,7 @@ def remove_packages(module, packages):
name_without_digits = re.sub('[0-9]', '', package)
rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
shlex_quote(name_without_digits)),
- use_unsafe_shell=True)
+ use_unsafe_shell=True)
if query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
@@ -164,9 +164,9 @@ def install_packages(module, packages, use_packages):
portinstall_path = module.get_bin_path('portinstall', True)
if use_packages == "yes":
- portinstall_params="--use-packages"
+ portinstall_params = "--use-packages"
else:
- portinstall_params=""
+ portinstall_params = ""
for package in packages:
if query_package(module, package):
@@ -193,10 +193,10 @@ def install_packages(module, packages, use_packages):
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default="present", choices=["present","absent"]),
- name = dict(aliases=["pkg"], required=True),
- use_packages = dict(type='bool', default='yes')))
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], required=True),
+ use_packages=dict(type='bool', default='yes')))
p = module.params
diff --git a/lib/ansible/modules/packaging/os/slackpkg.py b/lib/ansible/modules/packaging/os/slackpkg.py
index ad65ee50d6..20a22843be 100644
--- a/lib/ansible/modules/packaging/os/slackpkg.py
+++ b/lib/ansible/modules/packaging/os/slackpkg.py
@@ -96,7 +96,7 @@ def remove_packages(module, slackpkg_path, packages):
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
remove %s" % (slackpkg_path,
- package))
+ package))
if not module.check_mode and query_package(module, slackpkg_path,
package):
@@ -122,7 +122,7 @@ def install_packages(module, slackpkg_path, packages):
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
install %s" % (slackpkg_path,
- package))
+ package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
@@ -145,7 +145,7 @@ def upgrade_packages(module, slackpkg_path, packages):
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
upgrade %s" % (slackpkg_path,
- package))
+ package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
diff --git a/lib/ansible/modules/packaging/os/sorcery.py b/lib/ansible/modules/packaging/os/sorcery.py
index a148612123..26f37348b7 100644
--- a/lib/ansible/modules/packaging/os/sorcery.py
+++ b/lib/ansible/modules/packaging/os/sorcery.py
@@ -607,17 +607,17 @@ def manage_spells(module):
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(default=None, aliases=['spell'], type='list'),
- state = dict(default='present', choices=['present', 'latest',
- 'absent', 'cast', 'dispelled', 'rebuild']),
- depends = dict(default=None),
- update = dict(default=False, type='bool'),
- update_cache = dict(default=False, aliases=['update_codex'], type='bool'),
- cache_valid_time = dict(default=0, type='int')
+ argument_spec=dict(
+ name=dict(default=None, aliases=['spell'], type='list'),
+ state=dict(default='present', choices=['present', 'latest',
+ 'absent', 'cast', 'dispelled', 'rebuild']),
+ depends=dict(default=None),
+ update=dict(default=False, type='bool'),
+ update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
+ cache_valid_time=dict(default=0, type='int')
),
- required_one_of = [['name', 'update', 'update_cache']],
- supports_check_mode = True
+ required_one_of=[['name', 'update', 'update_cache']],
+ supports_check_mode=True
)
if os.geteuid() != 0:
diff --git a/lib/ansible/modules/packaging/os/svr4pkg.py b/lib/ansible/modules/packaging/os/svr4pkg.py
index b8bcc68400..e4b7d31630 100644
--- a/lib/ansible/modules/packaging/os/svr4pkg.py
+++ b/lib/ansible/modules/packaging/os/svr4pkg.py
@@ -105,6 +105,7 @@ EXAMPLES = '''
import os
import tempfile
+
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
@@ -117,6 +118,7 @@ def package_installed(module, name, category):
else:
return False
+
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
@@ -141,48 +143,52 @@ basedir=default
os.close(desc)
return filename
+
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
+
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
- cmd = [ 'pkgadd', '-n']
+ cmd = ['pkgadd', '-n']
if zone == 'current':
- cmd += [ '-G' ]
- cmd += [ '-a', adminfile, '-d', src ]
+ cmd += ['-G']
+ cmd += ['-a', adminfile, '-d', src]
if proxy is not None:
- cmd += [ '-x', proxy ]
+ cmd += ['-x', proxy]
if response_file is not None:
- cmd += [ '-r', response_file ]
+ cmd += ['-r', response_file]
if category:
- cmd += [ '-Y' ]
+ cmd += ['-Y']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
+
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
- cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
+ cmd = ['pkgrm', '-na', adminfile, '-Y', name]
else:
- cmd = [ 'pkgrm', '-na', adminfile, name]
+ cmd = ['pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True),
- state = dict(required = True, choices=['present', 'absent']),
- src = dict(default = None),
- proxy = dict(default = None),
- response_file = dict(default = None),
- zone = dict(required=False, default = 'all', choices=['current','all']),
- category = dict(default=False, type='bool')
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ src=dict(default=None),
+ proxy=dict(default=None),
+ response_file=dict(default=None),
+ zone=dict(required=False, default='all', choices=['current', 'all']),
+ category=dict(default=False, type='bool')
),
supports_check_mode=True
)
diff --git a/lib/ansible/modules/packaging/os/swdepot.py b/lib/ansible/modules/packaging/os/swdepot.py
index 8b9785202a..f32e43c7e5 100644
--- a/lib/ansible/modules/packaging/os/swdepot.py
+++ b/lib/ansible/modules/packaging/os/swdepot.py
@@ -108,6 +108,7 @@ def query_package(module, name, depot=None):
return rc, version
+
def remove_package(module, name):
""" Uninstall package if installed. """
@@ -119,6 +120,7 @@ def remove_package(module, name):
else:
return rc, stderr
+
def install_package(module, depot, name):
""" Install package if not already installed """
@@ -129,12 +131,13 @@ def install_package(module, depot, name):
else:
return rc, stderr
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(aliases=['pkg'], required=True),
- state = dict(choices=['present', 'absent', 'latest'], required=True),
- depot = dict(default=None, required=False)
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(choices=['present', 'absent', 'latest'], required=True),
+ depot=dict(default=None, required=False)
),
supports_check_mode=True
)
@@ -145,12 +148,11 @@ def main():
changed = False
msg = "No changed"
rc = 0
- if ( state == 'present' or state == 'latest' ) and depot is None:
+ if (state == 'present' or state == 'latest') and depot is None:
output = "depot parameter is mandatory in present or latest task"
module.fail_json(name=name, msg=output, rc=rc)
-
- #Check local version
+ # Check local version
rc, version_installed = query_package(module, name)
if not rc:
installed = True
@@ -159,7 +161,7 @@ def main():
else:
installed = False
- if ( state == 'present' or state == 'latest' ) and installed is False:
+ if (state == 'present' or state == 'latest') and installed is False:
if module.check_mode:
module.exit_json(changed=True)
rc, output = install_package(module, depot, name)
@@ -172,14 +174,14 @@ def main():
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'latest' and installed is True:
- #Check depot version
+ # Check depot version
rc, version_depot = query_package(module, name, depot)
if not rc:
- if compare_package(version_installed,version_depot) == -1:
+ if compare_package(version_installed, version_depot) == -1:
if module.check_mode:
module.exit_json(changed=True)
- #Install new version
+ # Install new version
rc, output = install_package(module, depot, name)
if not rc:
diff --git a/lib/ansible/modules/packaging/os/zypper.py b/lib/ansible/modules/packaging/os/zypper.py
index 99d00a0d77..15281e0bce 100644
--- a/lib/ansible/modules/packaging/os/zypper.py
+++ b/lib/ansible/modules/packaging/os/zypper.py
@@ -204,7 +204,6 @@ class Package:
return self.prefix + self.name + self.version
-
def split_name_version(name):
"""splits of the package name and desired version
@@ -301,7 +300,7 @@ def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
return packages, rc, stdout, stderr
- m.fail_json(msg='Zypper run command failed with return code %s.'%rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
def get_cmd(m, subcommand):
@@ -455,20 +454,21 @@ def repo_refresh(m):
# ===========================================
# Main control flow
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True, aliases=['pkg'], type='list'),
- state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
- type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
- disable_gpg_check = dict(required=False, default='no', type='bool'),
- disable_recommends = dict(required=False, default='yes', type='bool'),
- force = dict(required=False, default='no', type='bool'),
- update_cache = dict(required=False, aliases=['refresh'], default='no', type='bool'),
- oldpackage = dict(required=False, default='no', type='bool'),
- extra_args = dict(required=False, default=None),
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ disable_gpg_check=dict(required=False, default='no', type='bool'),
+ disable_recommends=dict(required=False, default='yes', type='bool'),
+ force=dict(required=False, default='no', type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'),
+ oldpackage=dict(required=False, default='no', type='bool'),
+ extra_args=dict(required=False, default=None),
),
- supports_check_mode = True
+ supports_check_mode=True
)
name = module.params['name']
diff --git a/lib/ansible/modules/packaging/os/zypper_repository.py b/lib/ansible/modules/packaging/os/zypper_repository.py
index 7ab1afdc61..ca1b04bc10 100644
--- a/lib/ansible/modules/packaging/os/zypper_repository.py
+++ b/lib/ansible/modules/packaging/os/zypper_repository.py
@@ -146,6 +146,7 @@ REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
from distutils.version import LooseVersion
+
def _get_cmd(*args):
"""Combines the non-interactive zypper command with arguments/subcommands"""
cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
@@ -178,6 +179,7 @@ def _parse_repos(module):
else:
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
def _repo_changes(realrepo, repocmp):
"Check whether the 2 given repos have different settings."
for k in repocmp:
@@ -194,6 +196,7 @@ def _repo_changes(realrepo, repocmp):
return True
return False
+
def repo_exists(module, repodata, overwrite_multiple):
"""Check whether the repository already exists.
@@ -288,6 +291,7 @@ def get_zypper_version(module):
return LooseVersion('1.0')
return LooseVersion(stdout.split()[1])
+
def runrefreshrepo(module, auto_import_keys=False, shortname=None):
"Forces zypper to refresh repo metadata."
if auto_import_keys:
@@ -309,15 +313,15 @@ def main():
state=dict(choices=['present', 'absent'], default='present'),
runrefresh=dict(required=False, default='no', type='bool'),
description=dict(required=False),
- disable_gpg_check = dict(required=False, default=False, type='bool'),
- autorefresh = dict(required=False, default=True, type='bool', aliases=['refresh']),
- priority = dict(required=False, type='int'),
- enabled = dict(required=False, default=True, type='bool'),
- overwrite_multiple = dict(required=False, default=False, type='bool'),
- auto_import_keys = dict(required=False, default=False, type='bool'),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority=dict(required=False, type='int'),
+ enabled=dict(required=False, default=True, type='bool'),
+ overwrite_multiple=dict(required=False, default=False, type='bool'),
+ auto_import_keys=dict(required=False, default=False, type='bool'),
),
supports_check_mode=False,
- required_one_of = [['state','runrefresh']],
+ required_one_of=[['state', 'runrefresh']],
)
repo = module.params['repo']
diff --git a/lib/ansible/modules/storage/infinidat/infini_export.py b/lib/ansible/modules/storage/infinidat/infini_export.py
index 827c353163..7edb89a865 100644
--- a/lib/ansible/modules/storage/infinidat/infini_export.py
+++ b/lib/ansible/modules/storage/infinidat/infini_export.py
@@ -158,10 +158,10 @@ def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True),
- state = dict(default='present', choices=['present', 'absent']),
- filesystem = dict(required=True),
- client_list = dict(type='list')
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ filesystem=dict(required=True),
+ client_list=dict(type='list')
)
)
@@ -172,10 +172,10 @@ def main():
if not HAS_MUNCH:
module.fail_json(msg='the python munch library is required for this module')
- state = module.params['state']
- system = get_system(module)
+ state = module.params['state']
+ system = get_system(module)
filesystem = get_filesystem(module, system)
- export = get_export(module, filesystem, system)
+ export = get_export(module, filesystem, system)
if filesystem is None:
module.fail_json(msg='Filesystem {} not found'.format(module.params['filesystem']))
diff --git a/lib/ansible/modules/storage/infinidat/infini_export_client.py b/lib/ansible/modules/storage/infinidat/infini_export_client.py
index dba9b2de0c..80f0a0f479 100644
--- a/lib/ansible/modules/storage/infinidat/infini_export_client.py
+++ b/lib/ansible/modules/storage/infinidat/infini_export_client.py
@@ -116,11 +116,11 @@ def update_client(module, export):
changed = False
- client = module.params['client']
- access_mode = module.params['access_mode']
+ client = module.params['client']
+ access_mode = module.params['access_mode']
no_root_squash = module.params['no_root_squash']
- client_list = export.get_permissions()
+ client_list = export.get_permissions()
client_not_in_list = True
for index, item in enumerate(client_list):
@@ -154,7 +154,7 @@ def delete_client(module, export):
changed = False
- client = module.params['client']
+ client = module.params['client']
client_list = export.get_permissions()
for index, item in enumerate(client_list):
@@ -175,11 +175,11 @@ def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- client = dict(required=True),
- access_mode = dict(choices=['RO', 'RW'], default='RW'),
- no_root_squash = dict(type='bool', default='no'),
- state = dict(default='present', choices=['present', 'absent']),
- export = dict(required=True)
+ client=dict(required=True),
+ access_mode=dict(choices=['RO', 'RW'], default='RW'),
+ no_root_squash=dict(type='bool', default='no'),
+ state=dict(default='present', choices=['present', 'absent']),
+ export=dict(required=True)
)
)
@@ -190,8 +190,8 @@ def main():
if not HAS_MUNCH:
module.fail_json(msg='the python munch library is required for this module')
- system = get_system(module)
- export = get_export(module, system)
+ system = get_system(module)
+ export = get_export(module, system)
if module.params['state'] == 'present':
update_client(module, export)
diff --git a/lib/ansible/modules/storage/infinidat/infini_fs.py b/lib/ansible/modules/storage/infinidat/infini_fs.py
index 68a1749c97..aeddc2dca0 100644
--- a/lib/ansible/modules/storage/infinidat/infini_fs.py
+++ b/lib/ansible/modules/storage/infinidat/infini_fs.py
@@ -126,10 +126,10 @@ def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True),
- state = dict(default='present', choices=['present', 'absent']),
- pool = dict(required=True),
- size = dict()
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict()
)
)
@@ -146,9 +146,9 @@ def main():
except:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
- state = module.params['state']
- system = get_system(module)
- pool = get_pool(module, system)
+ state = module.params['state']
+ system = get_system(module)
+ pool = get_pool(module, system)
filesystem = get_filesystem(module, system)
if pool is None:
diff --git a/lib/ansible/modules/storage/infinidat/infini_host.py b/lib/ansible/modules/storage/infinidat/infini_host.py
index 6389d22992..516f2303f9 100644
--- a/lib/ansible/modules/storage/infinidat/infini_host.py
+++ b/lib/ansible/modules/storage/infinidat/infini_host.py
@@ -81,7 +81,7 @@ from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_syste
@api_wrapper
def get_host(module, system):
- host = None
+ host = None
for h in system.hosts.to_list():
if h.get_name() == module.params['name']:
@@ -124,10 +124,10 @@ def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True),
- state = dict(default='present', choices=['present', 'absent']),
- wwns = dict(type='list'),
- volume = dict()
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wwns=dict(type='list'),
+ volume=dict()
)
)
@@ -136,9 +136,9 @@ def main():
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
- state = module.params['state']
+ state = module.params['state']
system = get_system(module)
- host = get_host(module, system)
+ host = get_host(module, system)
if module.params['volume']:
try:
diff --git a/lib/ansible/modules/storage/infinidat/infini_pool.py b/lib/ansible/modules/storage/infinidat/infini_pool.py
index cf90be4d86..da0068a73b 100644
--- a/lib/ansible/modules/storage/infinidat/infini_pool.py
+++ b/lib/ansible/modules/storage/infinidat/infini_pool.py
@@ -102,9 +102,9 @@ def get_pool(module, system):
@api_wrapper
def create_pool(module, system):
"""Create Pool"""
- name = module.params['name']
- size = module.params['size']
- vsize = module.params['vsize']
+ name = module.params['name']
+ size = module.params['size']
+ vsize = module.params['vsize']
ssd_cache = module.params['ssd_cache']
if not module.check_mode:
@@ -126,10 +126,10 @@ def create_pool(module, system):
@api_wrapper
def update_pool(module, system, pool):
"""Update Pool"""
- changed = False
+ changed = False
- size = module.params['size']
- vsize = module.params['vsize']
+ size = module.params['size']
+ vsize = module.params['vsize']
ssd_cache = module.params['ssd_cache']
# Roundup the capacity to mimic Infinibox behaviour
@@ -167,11 +167,11 @@ def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True),
- state = dict(default='present', choices=['present', 'absent']),
- size = dict(),
- vsize = dict(),
- ssd_cache = dict(type='bool', default=True)
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ size=dict(),
+ vsize=dict(),
+ ssd_cache=dict(type='bool', default=True)
)
)
@@ -194,9 +194,9 @@ def main():
except:
module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
- state = module.params['state']
+ state = module.params['state']
system = get_system(module)
- pool = get_pool(module, system)
+ pool = get_pool(module, system)
if state == 'present' and not pool:
create_pool(module, system)
diff --git a/lib/ansible/modules/storage/infinidat/infini_vol.py b/lib/ansible/modules/storage/infinidat/infini_vol.py
index 729ade6f28..c83a7a6767 100644
--- a/lib/ansible/modules/storage/infinidat/infini_vol.py
+++ b/lib/ansible/modules/storage/infinidat/infini_vol.py
@@ -126,10 +126,10 @@ def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name = dict(required=True),
- state = dict(default='present', choices=['present', 'absent']),
- pool = dict(required=True),
- size = dict()
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict()
)
)
@@ -144,9 +144,9 @@ def main():
except:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
- state = module.params['state']
+ state = module.params['state']
system = get_system(module)
- pool = get_pool(module, system)
+ pool = get_pool(module, system)
volume = get_volume(module, system)
if pool is None:
diff --git a/lib/ansible/modules/storage/netapp/na_cdot_qtree.py b/lib/ansible/modules/storage/netapp/na_cdot_qtree.py
index aa9cb7b381..0119327d8c 100644
--- a/lib/ansible/modules/storage/netapp/na_cdot_qtree.py
+++ b/lib/ansible/modules/storage/netapp/na_cdot_qtree.py
@@ -129,7 +129,7 @@ class NetAppCDOTQTree(object):
qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'qtree-info', **{'vserver': self.vserver,
- 'volume':self.flexvol_name,
+ 'volume': self.flexvol_name,
'qtree': self.name})
query = netapp_utils.zapi.NaElement('query')
diff --git a/lib/ansible/modules/storage/netapp/na_cdot_volume.py b/lib/ansible/modules/storage/netapp/na_cdot_volume.py
index d62159cfa1..a58c024efb 100644
--- a/lib/ansible/modules/storage/netapp/na_cdot_volume.py
+++ b/lib/ansible/modules/storage/netapp/na_cdot_volume.py
@@ -246,7 +246,7 @@ class NetAppCDOTVolume(object):
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.name, 'unmount-and-offline':
- 'true'})
+ 'true'})
try:
self.server.invoke_successfully(volume_delete,
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_amg.py b/lib/ansible/modules/storage/netapp/netapp_e_amg.py
index b5cf67d8cd..476a2b83f9 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_amg.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_amg.py
@@ -168,7 +168,7 @@ def create_async(module, ssid, api_url, api_pwd, api_usr, body):
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception as e:
- module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
+ module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
index 58d3a4723b..994376ecbe 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
@@ -125,7 +125,7 @@ class GroupBy(object):
# python 2, 3 generic grouping.
def __init__(self, iterable, key=None):
if key is None:
- key = lambda x: x
+ def key(x): return x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
diff --git a/lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py b/lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py
index 02298fc895..e609188827 100644
--- a/lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py
+++ b/lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py
@@ -312,11 +312,11 @@ class SolidFireSnapShotSchedule(object):
elif self.state == 'present':
# Check if we need to update the account
- if self.retention is not None and schedule_detail.schedule_info.retention !=self.retention:
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
update_schedule = True
changed = True
- elif schedule_detail.name !=self.name:
+ elif schedule_detail.name != self.name:
update_schedule = True
changed = True
diff --git a/lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py b/lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py
index bd6ba5d01d..ef62ebc6dd 100644
--- a/lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py
+++ b/lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py
@@ -225,7 +225,7 @@ class SolidFireVolumeAccessGroup(object):
update_group = True
changed = True
elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
- self.attributes is not None:
+ self.attributes is not None:
update_group = True
changed = True
diff --git a/lib/ansible/modules/storage/netapp/sf_volume_manager.py b/lib/ansible/modules/storage/netapp/sf_volume_manager.py
index d95c9a1dc9..15455a55fb 100644
--- a/lib/ansible/modules/storage/netapp/sf_volume_manager.py
+++ b/lib/ansible/modules/storage/netapp/sf_volume_manager.py
@@ -289,7 +289,7 @@ class SolidFireVolume(object):
elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
size_difference = abs(float(volume_detail.total_size - self.size))
# Change size only if difference is bigger than 0.001
- if size_difference/self.size > 0.001:
+ if size_difference / self.size > 0.001:
update_volume = True
changed = True
@@ -312,8 +312,8 @@ class SolidFireVolume(object):
self.create_volume()
result_message = "Volume created"
elif update_volume:
- self.update_volume()
- result_message = "Volume updated"
+ self.update_volume()
+ result_message = "Volume updated"
elif self.state == 'absent':
self.delete_volume()
diff --git a/lib/ansible/modules/system/cron.py b/lib/ansible/modules/system/cron.py
index 39e8fd9681..2e72720dfb 100644
--- a/lib/ansible/modules/system/cron.py
+++ b/lib/ansible/modules/system/cron.py
@@ -217,6 +217,7 @@ class CronTab(object):
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
+
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
diff --git a/lib/ansible/modules/web_infrastructure/letsencrypt.py b/lib/ansible/modules/web_infrastructure/letsencrypt.py
index c60832c3a5..de74398abb 100644
--- a/lib/ansible/modules/web_infrastructure/letsencrypt.py
+++ b/lib/ansible/modules/web_infrastructure/letsencrypt.py
@@ -279,6 +279,7 @@ class ACMEDirectory(object):
require authentication).
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.2
'''
+
def __init__(self, module):
self.module = module
self.directory_root = module.params['acme_directory']
@@ -304,6 +305,7 @@ class ACMEAccount(object):
ACME server. Provides access to account bound information like
the currently active authorizations and valid certificates
'''
+
def __init__(self, module):
self.module = module
self.key = module.params['account_key']
@@ -513,6 +515,7 @@ class ACMEClient(object):
start and validate ACME challenges and download the respective
certificates.
'''
+
def __init__(self, module):
self.module = module
self.challenge = module.params['challenge']
diff --git a/test/sanity/pep8/legacy-files.txt b/test/sanity/pep8/legacy-files.txt
index 53c281892a..4ab50620fa 100644
--- a/test/sanity/pep8/legacy-files.txt
+++ b/test/sanity/pep8/legacy-files.txt
@@ -1,285 +1,36 @@
-lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
-lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
-lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
-lib/ansible/modules/cloud/amazon/_ec2_vpc.py
-lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_options.py
lib/ansible/modules/cloud/amazon/aws_kms.py
-lib/ansible/modules/cloud/amazon/cloudformation.py
-lib/ansible/modules/cloud/amazon/cloudfront_facts.py
-lib/ansible/modules/cloud/amazon/dynamodb_table.py
-lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
lib/ansible/modules/cloud/amazon/ec2_elb.py
-lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
-lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
-lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
-lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
-lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
-lib/ansible/modules/cloud/amazon/ec2_snapshot.py
-lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
-lib/ansible/modules/cloud/amazon/ec2_tag.py
-lib/ansible/modules/cloud/amazon/ec2_vol.py
-lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
-lib/ansible/modules/cloud/amazon/ec2_win_password.py
-lib/ansible/modules/cloud/amazon/ecs_cluster.py
-lib/ansible/modules/cloud/amazon/ecs_service.py
-lib/ansible/modules/cloud/amazon/ecs_service_facts.py
lib/ansible/modules/cloud/amazon/ecs_task.py
-lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
-lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
-lib/ansible/modules/cloud/amazon/elb_classic_lb.py
lib/ansible/modules/cloud/amazon/elb_instance.py
-lib/ansible/modules/cloud/amazon/execute_lambda.py
lib/ansible/modules/cloud/amazon/iam.py
-lib/ansible/modules/cloud/amazon/iam_policy.py
-lib/ansible/modules/cloud/amazon/rds_subnet_group.py
-lib/ansible/modules/cloud/amazon/redshift.py
-lib/ansible/modules/cloud/amazon/route53_health_check.py
-lib/ansible/modules/cloud/amazon/s3_lifecycle.py
-lib/ansible/modules/cloud/amazon/s3_logging.py
-lib/ansible/modules/cloud/amazon/s3_website.py
-lib/ansible/modules/cloud/amazon/sns_topic.py
-lib/ansible/modules/cloud/amazon/sts_assume_role.py
-lib/ansible/modules/cloud/amazon/sts_session_token.py
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py
lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py
-lib/ansible/modules/cloud/azure/azure_rm_subnet.py
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py
lib/ansible/modules/cloud/centurylink/clc_loadbalancer.py
-lib/ansible/modules/cloud/docker/docker_container.py
-lib/ansible/modules/cloud/docker/docker_image.py
-lib/ansible/modules/cloud/docker/docker_image_facts.py
-lib/ansible/modules/cloud/docker/docker_network.py
-lib/ansible/modules/cloud/google/gc_storage.py
-lib/ansible/modules/cloud/google/gcdns_record.py
-lib/ansible/modules/cloud/google/gcdns_zone.py
-lib/ansible/modules/cloud/lxc/lxc_container.py
-lib/ansible/modules/cloud/lxd/lxd_container.py
-lib/ansible/modules/cloud/misc/rhevm.py
-lib/ansible/modules/cloud/misc/serverless.py
-lib/ansible/modules/cloud/misc/virt_net.py
-lib/ansible/modules/cloud/misc/virt_pool.py
-lib/ansible/modules/cloud/misc/xenserver_facts.py
-lib/ansible/modules/cloud/openstack/_os_server_actions.py
-lib/ansible/modules/cloud/openstack/os_image.py
-lib/ansible/modules/cloud/openstack/os_keypair.py
-lib/ansible/modules/cloud/openstack/os_nova_flavor.py
-lib/ansible/modules/cloud/openstack/os_quota.py
lib/ansible/modules/cloud/openstack/os_security_group_rule.py
lib/ansible/modules/cloud/openstack/os_server.py
-lib/ansible/modules/cloud/openstack/os_stack.py
-lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
-lib/ansible/modules/cloud/packet/packet_sshkey.py
-lib/ansible/modules/cloud/profitbricks/profitbricks.py
-lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
-lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py
-lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py
-lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py
-lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py
-lib/ansible/modules/cloud/rackspace/rax_cdb.py
-lib/ansible/modules/cloud/rackspace/rax_clb.py
-lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py
-lib/ansible/modules/cloud/rackspace/rax_dns.py
-lib/ansible/modules/cloud/rackspace/rax_dns_record.py
-lib/ansible/modules/cloud/rackspace/rax_facts.py
lib/ansible/modules/cloud/rackspace/rax_files.py
-lib/ansible/modules/cloud/rackspace/rax_keypair.py
-lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py
-lib/ansible/modules/cloud/rackspace/rax_mon_check.py
-lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
-lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
-lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
-lib/ansible/modules/cloud/univention/udm_dns_record.py
-lib/ansible/modules/cloud/univention/udm_dns_zone.py
-lib/ansible/modules/cloud/univention/udm_group.py
-lib/ansible/modules/cloud/univention/udm_share.py
-lib/ansible/modules/cloud/univention/udm_user.py
lib/ansible/modules/cloud/vmware/vsphere_guest.py
-lib/ansible/modules/cloud/webfaction/webfaction_app.py
-lib/ansible/modules/cloud/webfaction/webfaction_db.py
-lib/ansible/modules/cloud/webfaction/webfaction_domain.py
-lib/ansible/modules/cloud/webfaction/webfaction_site.py
-lib/ansible/modules/database/misc/kibana_plugin.py
-lib/ansible/modules/database/misc/riak.py
-lib/ansible/modules/database/mongodb/mongodb_parameter.py
lib/ansible/modules/database/mongodb/mongodb_user.py
-lib/ansible/modules/database/mssql/mssql_db.py
-lib/ansible/modules/database/postgresql/postgresql_ext.py
-lib/ansible/modules/database/postgresql/postgresql_lang.py
-lib/ansible/modules/database/postgresql/postgresql_schema.py
-lib/ansible/modules/database/vertica/vertica_configuration.py
-lib/ansible/modules/database/vertica/vertica_facts.py
-lib/ansible/modules/database/vertica/vertica_role.py
-lib/ansible/modules/database/vertica/vertica_schema.py
-lib/ansible/modules/database/vertica/vertica_user.py
-lib/ansible/modules/monitoring/bigpanda.py
-lib/ansible/modules/monitoring/datadog_event.py
-lib/ansible/modules/monitoring/icinga2_feature.py
-lib/ansible/modules/monitoring/librato_annotation.py
-lib/ansible/modules/monitoring/logentries.py
-lib/ansible/modules/monitoring/logicmonitor.py
-lib/ansible/modules/monitoring/nagios.py
-lib/ansible/modules/monitoring/newrelic_deployment.py
-lib/ansible/modules/monitoring/pagerduty.py
-lib/ansible/modules/monitoring/pingdom.py
-lib/ansible/modules/monitoring/sensu_check.py
-lib/ansible/modules/monitoring/sensu_subscription.py
-lib/ansible/modules/monitoring/stackdriver.py
-lib/ansible/modules/monitoring/uptimerobot.py
-lib/ansible/modules/net_tools/cloudflare_dns.py
-lib/ansible/modules/net_tools/dnsimple.py
lib/ansible/modules/net_tools/dnsmadeeasy.py
-lib/ansible/modules/net_tools/ipinfoio_facts.py
-lib/ansible/modules/net_tools/nmcli.py
-lib/ansible/modules/net_tools/omapi_host.py
lib/ansible/modules/net_tools/snmp_facts.py
-lib/ansible/modules/network/a10/a10_server_axapi3.py
-lib/ansible/modules/network/a10/a10_virtual_server.py
-lib/ansible/modules/network/aos/aos_asn_pool.py
lib/ansible/modules/network/aos/aos_blueprint.py
-lib/ansible/modules/network/aos/aos_blueprint_param.py
-lib/ansible/modules/network/aos/aos_blueprint_virtnet.py
lib/ansible/modules/network/aos/aos_device.py
lib/ansible/modules/network/aos/aos_external_router.py
lib/ansible/modules/network/aos/aos_ip_pool.py
-lib/ansible/modules/network/aos/aos_logical_device.py
lib/ansible/modules/network/aos/aos_logical_device_map.py
+lib/ansible/modules/network/aos/aos_logical_device.py
lib/ansible/modules/network/aos/aos_rack_type.py
lib/ansible/modules/network/aos/aos_template.py
-lib/ansible/modules/network/asa/asa_acl.py
-lib/ansible/modules/network/asa/asa_command.py
-lib/ansible/modules/network/asa/asa_config.py
-lib/ansible/modules/network/bigswitch/bigmon_chain.py
-lib/ansible/modules/network/bigswitch/bigmon_policy.py
-lib/ansible/modules/network/cumulus/_cl_bond.py
-lib/ansible/modules/network/cumulus/_cl_img_install.py
-lib/ansible/modules/network/cumulus/_cl_license.py
-lib/ansible/modules/network/cumulus/_cl_ports.py
-lib/ansible/modules/network/cumulus/nclu.py
-lib/ansible/modules/network/eos/eos_banner.py
-lib/ansible/modules/network/eos/eos_command.py
-lib/ansible/modules/network/eos/eos_config.py
lib/ansible/modules/network/eos/eos_eapi.py
-lib/ansible/modules/network/eos/eos_facts.py
lib/ansible/modules/network/eos/eos_system.py
lib/ansible/modules/network/eos/eos_user.py
-lib/ansible/modules/network/fortios/fortios_config.py
-lib/ansible/modules/network/fortios/fortios_ipv4_policy.py
-lib/ansible/modules/network/illumos/dladm_iptun.py
-lib/ansible/modules/network/illumos/dladm_linkprop.py
-lib/ansible/modules/network/ios/ios_banner.py
-lib/ansible/modules/network/ios/ios_command.py
-lib/ansible/modules/network/ios/ios_facts.py
lib/ansible/modules/network/ios/ios_system.py
-lib/ansible/modules/network/ios/ios_vrf.py
-lib/ansible/modules/network/netvisor/pn_cluster.py
-lib/ansible/modules/network/netvisor/pn_ospfarea.py
-lib/ansible/modules/network/netvisor/pn_vlag.py
-lib/ansible/modules/network/netvisor/pn_vlan.py
-lib/ansible/modules/network/netvisor/pn_vrouter.py
-lib/ansible/modules/network/netvisor/pn_vrouterbgp.py
-lib/ansible/modules/network/netvisor/pn_vrouterif.py
-lib/ansible/modules/network/netvisor/pn_vrouterlbif.py
-lib/ansible/modules/network/nxos/_nxos_mtu.py
-lib/ansible/modules/network/nxos/nxos_aaa_server_host.py
-lib/ansible/modules/network/nxos/nxos_config.py
-lib/ansible/modules/network/nxos/nxos_gir.py
-lib/ansible/modules/network/nxos/nxos_gir_profile_management.py
-lib/ansible/modules/network/nxos/nxos_igmp.py
-lib/ansible/modules/network/nxos/nxos_igmp_interface.py
-lib/ansible/modules/network/nxos/nxos_igmp_snooping.py
-lib/ansible/modules/network/nxos/nxos_ntp_auth.py
-lib/ansible/modules/network/nxos/nxos_ntp_options.py
lib/ansible/modules/network/nxos/nxos_nxapi.py
-lib/ansible/modules/network/nxos/nxos_overlay_global.py
-lib/ansible/modules/network/nxos/nxos_ping.py
-lib/ansible/modules/network/nxos/nxos_smu.py
-lib/ansible/modules/network/nxos/nxos_snapshot.py
-lib/ansible/modules/network/nxos/nxos_static_route.py
lib/ansible/modules/network/nxos/nxos_system.py
-lib/ansible/modules/network/nxos/nxos_udld.py
-lib/ansible/modules/network/nxos/nxos_udld_interface.py
lib/ansible/modules/network/nxos/nxos_user.py
-lib/ansible/modules/network/nxos/nxos_vrf.py
-lib/ansible/modules/network/nxos/nxos_vtp_domain.py
-lib/ansible/modules/network/nxos/nxos_vtp_password.py
-lib/ansible/modules/network/nxos/nxos_vtp_version.py
-lib/ansible/modules/network/ordnance/ordnance_config.py
-lib/ansible/modules/network/ordnance/ordnance_facts.py
lib/ansible/modules/network/ovs/openvswitch_bridge.py
lib/ansible/modules/network/ovs/openvswitch_port.py
-lib/ansible/modules/network/panos/panos_admin.py
-lib/ansible/modules/network/panos/panos_admpwd.py
-lib/ansible/modules/network/panos/panos_cert_gen_ssh.py
-lib/ansible/modules/network/panos/panos_check.py
-lib/ansible/modules/network/panos/panos_dag.py
-lib/ansible/modules/network/panos/panos_import.py
-lib/ansible/modules/network/panos/panos_interface.py
-lib/ansible/modules/network/panos/panos_loadcfg.py
-lib/ansible/modules/network/panos/panos_mgtconfig.py
-lib/ansible/modules/network/panos/panos_pg.py
-lib/ansible/modules/network/sros/sros_command.py
-lib/ansible/modules/network/sros/sros_config.py
lib/ansible/modules/network/sros/sros_rollback.py
-lib/ansible/modules/network/vyos/vyos_command.py
-lib/ansible/modules/network/vyos/vyos_system.py
-lib/ansible/modules/notification/campfire.py
-lib/ansible/modules/notification/flowdock.py
-lib/ansible/modules/notification/grove.py
-lib/ansible/modules/notification/hall.py
-lib/ansible/modules/notification/irc.py
-lib/ansible/modules/notification/jabber.py
-lib/ansible/modules/notification/mattermost.py
-lib/ansible/modules/notification/mqtt.py
-lib/ansible/modules/notification/osx_say.py
-lib/ansible/modules/notification/pushbullet.py
-lib/ansible/modules/notification/pushover.py
-lib/ansible/modules/notification/rocketchat.py
-lib/ansible/modules/notification/sendgrid.py
-lib/ansible/modules/notification/slack.py
-lib/ansible/modules/notification/twilio.py
-lib/ansible/modules/packaging/language/bundler.py
-lib/ansible/modules/packaging/language/cpanm.py
-lib/ansible/modules/packaging/language/gem.py
lib/ansible/modules/packaging/language/maven_artifact.py
-lib/ansible/modules/packaging/language/pear.py
-lib/ansible/modules/packaging/os/apk.py
-lib/ansible/modules/packaging/os/dpkg_selections.py
-lib/ansible/modules/packaging/os/homebrew.py
lib/ansible/modules/packaging/os/homebrew_cask.py
-lib/ansible/modules/packaging/os/layman.py
-lib/ansible/modules/packaging/os/macports.py
-lib/ansible/modules/packaging/os/opkg.py
-lib/ansible/modules/packaging/os/pkgin.py
-lib/ansible/modules/packaging/os/pkgng.py
-lib/ansible/modules/packaging/os/pkgutil.py
-lib/ansible/modules/packaging/os/portage.py
-lib/ansible/modules/packaging/os/portinstall.py
-lib/ansible/modules/packaging/os/sorcery.py
-lib/ansible/modules/packaging/os/svr4pkg.py
-lib/ansible/modules/packaging/os/swdepot.py
-lib/ansible/modules/packaging/os/zypper.py
-lib/ansible/modules/packaging/os/zypper_repository.py
-lib/ansible/modules/storage/infinidat/infini_export.py
-lib/ansible/modules/storage/infinidat/infini_export_client.py
-lib/ansible/modules/storage/infinidat/infini_fs.py
-lib/ansible/modules/storage/infinidat/infini_host.py
-lib/ansible/modules/storage/infinidat/infini_pool.py
-lib/ansible/modules/storage/infinidat/infini_vol.py
-lib/ansible/modules/storage/netapp/na_cdot_qtree.py
-lib/ansible/modules/storage/netapp/na_cdot_volume.py
-lib/ansible/modules/storage/netapp/netapp_e_amg.py
+lib/ansible/modules/packaging/os/homebrew.py
lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
-lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py
-lib/ansible/modules/storage/netapp/sf_volume_access_group_manager.py
-lib/ansible/modules/storage/netapp/sf_volume_manager.py